1 Introduction

This notebook contains analyses from the MEDAL study, assessing memory bias in remitted and depressed individuals. The pre-registration for the main analyses, as well as a preprint version of the accompanying article can be found here as well. The main aim of the current work was to examine how three groups (control, remitted, depressed) differ in terms of emotional memory dynamics in a real-life setting.

#Load packages 
Warning message:
In Sys.timezone() : unable to identify current timezone 'C':
please set environment variable 'TZ'
#renv::activate()
library(dplyr)

Attaching package: ‘dplyr’

The following objects are masked from ‘package:stats’:

    filter, lag

The following objects are masked from ‘package:base’:

    intersect, setdiff, setequal, union
library(janitor)

Attaching package: ‘janitor’

The following objects are masked from ‘package:stats’:

    chisq.test, fisher.test
library(readxl)
library(data.table)
data.table 1.14.8 using 4 threads (see ?getDTthreads).  Latest news: r-datatable.com

Attaching package: ‘data.table’

The following objects are masked from ‘package:dplyr’:

    between, first, last
library(ggplot2)
Learn more about the underlying theory at https://ggplot2-book.org/
library(psych)

Attaching package: ‘psych’

The following objects are masked from ‘package:ggplot2’:

    %+%, alpha
library(devtools)
Loading required package: usethis
library(pastecs)

Attaching package: ‘pastecs’

The following objects are masked from ‘package:data.table’:

    first, last

The following objects are masked from ‘package:dplyr’:

    first, last
library(cli)
library(lmerTest) #linear mixed models 
Loading required package: lme4
Loading required package: Matrix

Attaching package: ‘lmerTest’

The following object is masked from ‘package:lme4’:

    lmer

The following object is masked from ‘package:stats’:

    step
library(performance) #fits of residuals 
library(DescTools)
Registered S3 method overwritten by 'DescTools':
  method        from       
  print.palette wesanderson

Attaching package: ‘DescTools’

The following objects are masked from ‘package:psych’:

    AUC, ICC, SD

The following object is masked from ‘package:data.table’:

    %like%
library(tidyr) # Separate trigger column

Attaching package: ‘tidyr’

The following objects are masked from ‘package:Matrix’:

    expand, pack, unpack

The following object is masked from ‘package:pastecs’:

    extract
library(sjPlot) # for nice tables and interaction plots 
library(knitr) # to print the tables within notebook
library(foreach) #run parallel loops

Attaching package: ‘foreach’

The following object is masked from ‘package:DescTools’:

    %:%
library(parallel)
library(doParallel) #run parallel loops
Loading required package: iterators
library(ggpubr) # emmeans
library(mediation) #mediation analysis 
Loading required package: MASS

Attaching package: ‘MASS’

The following object is masked from ‘package:dplyr’:

    select

Loading required package: mvtnorm
Loading required package: sandwich
mediation: Causal Mediation Analysis
Version: 4.5.0


Attaching package: ‘mediation’

The following object is masked from ‘package:psych’:

    mediate
library(plotly) #for interactive plots

Attaching package: ‘plotly’

The following object is masked from ‘package:MASS’:

    select

The following object is masked from ‘package:ggplot2’:

    last_plot

The following object is masked from ‘package:stats’:

    filter

The following object is masked from ‘package:graphics’:

    layout
library(jtools) #for theme_apa

Attaching package: ‘jtools’

The following object is masked from ‘package:mvtnorm’:

    standardize

The following object is masked from ‘package:DescTools’:

    %nin%
library(JSmediation) #moderated mediation
library(wesanderson) 
source("functions.R")

2 Data

Here we load in our data and preprocess it in steps. We first need to separate the different surveys that were delivered to bring them into the same sampling times, merge in some descriptives we need as covariates, and estimate subject-centered and lagged variables for our models later.

  1. Load and clean data
# Load the data
MEDAL_ESM_compleet <- read_excel("data/MEDAL_ESM_compleet.xlsx")
# Move variables around and clean names
MEDAL_ESM_compleet= MEDAL_ESM_compleet %>%
  rename(id='Movisens-ID') %>% 
  clean_names() %>%
  rename(subjectcode = 'deelnemersnr') %>% 
  relocate('subjectcode', .after = 'id')
df_medal_try = MEDAL_ESM_compleet
  1. Split different EMA surveys we have
# Sleep EMA Qs
df_medal_sleep = df_medal_try %>% 
  filter(form=='Slaap') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, 14)) 
# Recent EMA Qs
df_medal_recent = df_medal_try %>% 
  filter(form=='Recent') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rec')))
# Remote EMA Qs
df_medal_remote = df_medal_try %>%
  filter(form=='Remote') %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rem')))
# Mastery EMA Qs
df_medal_mastery = df_medal_try %>%
  filter(form=='Mastery') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('master')))
# Standard Qs
df_medal_standard = df_medal_try %>%
  filter(form=='Standaard')  %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, (-contains('mastery') & -contains('rem') & -contains('rec')))) %>% 
  dplyr::select(-c(7,13,14))

# merge sleep and standard
df_medal_merged = merge(df_medal_sleep, df_medal_standard, by=c('id', 'trigger_counter', 'subjectcode'), suffixes = c("_sleep", ""), all=T )

# merge to recent, remote and mastery
df_medal_merged= merge(df_medal_merged, df_medal_recent, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_recent'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_remote, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_remote'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_mastery, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_mastery'), all = T )

# Fix start dates and times
df_medal_merged$form_start_date <- as.character(df_medal_merged$form_start_date)
df_medal_merged$form_start_date <- as.ITime(df_medal_merged$form_start_date)
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(form_start_date = ifelse(is.na(form_start_date), form_start_date_sleep, form_start_date))
  1. Add variables on Education, Gender, and Age
# Load in previous data for demographics (also contains EMA)
load("data/IPAQ_Maeve_Workspace.RData")
df_MEDAL <- df_MEDAL[-c(192), ]

# Select relevant demo info
df_MEDAL = df_MEDAL %>% 
  dplyr::select(1, 3, 5:7) %>% 
  rename('subjectcode' = 'subject', 'education' = 'Education3groups', 'n_episodes'='NumberEpisodes' )

# merge demo to full EMA
df_medal_merged =  merge(df_medal_merged, df_MEDAL, by = c('subjectcode'), all = T ) %>%
  clean_names() 

df_medal_merged$education = as.factor(df_medal_merged$education)
df_medal_merged$gender = as.factor(df_medal_merged$gender)

df_medal_merged = df_medal_merged %>% 
  relocate(c('id', 'gender', 'age', 'education'), .after = 'subjectcode') %>% 
  relocate ('age', .after = 'gender') %>% 
  relocate ('education', .after = 'age') %>% 
  relocate ('id', .before = 'subjectcode')
  1. Categorize the subjectcode into the three conditions (Depressed, Remitted, Control (Never-depressed))
df_medal_merged$condition = case_when(df_medal_merged$subjectcode >= 700 & df_medal_merged$subjectcode < 800 ~ "remitted", df_medal_merged$subjectcode >= 800 & df_medal_merged$subjectcode < 900 ~ "depressed", df_medal_merged$subjectcode >= 900 ~ "control")
df_medal_merged = df_medal_merged %>% relocate ('condition', .after = 'subjectcode' )
df_medal_merged$condition <- as.factor(df_medal_merged$condition)
  1. Calculate reaction time
df_medal_merged$rt_sleep = lubridate::as.difftime( df_medal_merged$form_finish_date_sleep - df_medal_merged$form_start_date_sleep)
  1. Separate time from when trigger was sent

df_medal_merged = df_medal_merged %>% 
  tidyr::separate(trigger, c('Random', 'Time', 'Trigger_Time_1', 'Trigger_Time_2'))  %>% 
  dplyr::mutate(trigger_time = paste(Trigger_Time_1, Trigger_Time_2, sep=':')) %>%#separate time from previous trigger
  dplyr::mutate(trigger = paste(Random, Time, trigger_time, sep=' ')) #bring back 'trigger' column 

#Turn numbers into time format
df_medal_merged$trigger_time <- as.ITime(df_medal_merged$trigger_time)
  1. Find the trigger number per day
#df_medal_merged$trigger_counter[is.na(df_medal_merged$trigger_time)] <- 1

df_medal_merged$trigger_number[is.na(df_medal_merged$trigger_time)] <- 1 
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("10:00:00"))& df_medal_merged$trigger_time < (as.ITime("12:00:00"))] <- 2
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("12:00:00"))& df_medal_merged$trigger_time < (as.ITime("14:00:00"))] <- 3
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("14:00:00"))& df_medal_merged$trigger_time < (as.ITime("16:00:00"))] <- 4
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("16:00:00"))& df_medal_merged$trigger_time < (as.ITime("18:00:00"))] <- 5
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("18:00:00"))& df_medal_merged$trigger_time < (as.ITime("20:00:00"))] <- 6
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("20:00:00"))& df_medal_merged$trigger_time < (as.ITime("23:00:01"))] <- 7

# df
df_medal_merged = df_medal_merged %>% 
  relocate ('trigger_number', .after = 'trigger_counter')%>% relocate('trigger_time', .before ='form_start_date') %>%
  relocate('trigger', .before ='trigger_time')
  1. Final cleaning of the dataset

#Create dataset where double morning list is removed 
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter))

# now we have the clean dataframe, remove duplicated coloumns
df_medal_clean = df_medal_merged %>%
  dplyr::group_by(subjectcode) %>% 
  distinct(trigger_counter, .keep_all = TRUE) %>% 
  dplyr::select (1:12, 17:60) %>% 
  dplyr::mutate(original_order = row_number()) %>% 
  relocate('original_order', .before = 'id')

#Set the order of the factor to control, remission, depression (for visual purposes)
df_medal_clean$condition <- factor(df_medal_clean$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('control', 'remitted', 'depressed'))

# Create dummy variable 
df_medal_clean = df_medal_clean %>% 
  mutate(condition_dummy = case_when(condition == 'depressed'~ 3, condition == 'remitted'~ 2, condition == 'control' ~ 1))

df_medal_clean$condition_dummy = as.factor(df_medal_clean$condition_dummy)

#set weekday 
df_medal_clean$form_finish_date <- as.character(df_medal_clean$form_finish_date)
df_medal_clean$form_finish_date <- strptime(df_medal_clean$form_finish_date, format = '%Y-%m-%d %H:%M:%S')

df_medal_clean$finish_date <- as.Date(df_medal_clean$form_finish_date)
df_medal_clean$weekday <- as.integer(format(df_medal_clean$finish_date, '%w'))
df_medal_clean = df_medal_clean %>% relocate('weekday', .after = 'trigger_number')

#solve duplicate issue 
df_medal_duplicate = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% filter(duplicated(trigger_number))

  #since row 10 for subject 712 is a duplicate morning list
  df_medal_clean <- subset(df_medal_clean, !(subjectcode == 712 & original_order == 10))
  df_medal_clean = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% 
    mutate(temp = lead(trigger_number), 
           trigger_number = case_when(trigger_number == temp ~ (temp -1), TRUE ~ trigger_number)) %>%
    select(-temp)
  #Take the rows out that have 'NA'for the weekday since form wasn't finished and there is no data for PA, NA, or Memory
  df_medal_clean = df_medal_clean %>% filter(!is.na(weekday))
  df_medal_clean$original_order <- 1:nrow(df_medal_clean) 

# Create expanded df with all potential datapoints

df_medal_day = df_medal_clean %>% 
  dplyr::select('subjectcode', 'weekday', 'original_order') %>% 
  dplyr::mutate(weekday = as.numeric(weekday)) %>%
  dplyr::group_by(subjectcode) %>%
  dplyr:: distinct(weekday, .keep_all =T) %>% 
  dplyr::ungroup()

df_medal_day = df_medal_day %>% 
  slice(rep(1:n(), each = 7)) %>% 
  dplyr::mutate(trigger_number = rep(1:7, length.out=n())) %>% 
  dplyr::rename(order = original_order)


#merge with main dataframe
df_medal_clean =  merge(df_medal_day, df_medal_clean, by=c('subjectcode', 'trigger_number', 'weekday'), all= T) 
df_medal_clean = df_medal_clean %>% mutate(order = as.numeric(order))
df_medal_clean = arrange(df_medal_clean, order)
  1. Centre, scale, and average Variables

used_vars = c("neg_mood", "rec_mem_neg_mood", "rem_mem_neg_mood", "pos_mood", "rec_mem_pos_mood", "rem_mem_pos_mood")

#Centering & scaling 
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  # Center and scale
  dplyr::mutate( across(used_vars, ~ (.x/10), .names = "{.col}" ),
                 across(used_vars, ~ mean(.x, na.rm=T), .names = "{.col}_m" ),
                 across(used_vars, ~ (.x - mean(.x, na.rm=T)), .names = "{.col}_c" ), 
                 across(paste0(used_vars, "_c"), ~ DescTools::Winsorize(.x, na.rm = T), .names = "{.col}")) %>%  
  ungroup() %>%
  # Rescale to positive
  mutate(across(c(paste0(used_vars,"_c")), ~ abs(min(.x, na.rm = T)) + 1 + .x, .names = "{.col}s"))
  1. Lag variables
# Remote 

df_medal_clean = df_medal_clean %>%
  dplyr::group_by(subjectcode) %>%
  mutate(rem_mem_pos_mood_lag = lead(rem_mem_pos_mood, n=7, order_by=subjectcode),
         rem_mem_neg_mood_lag = lead(rem_mem_neg_mood, n=7, order_by=subjectcode),
         pos_mood_lag_rem = lead(pos_mood, n=7, order_by=subjectcode),
         neg_mood_lag_rem = lead(neg_mood, n=7, order_by=subjectcode), 
         rem_mem_pos_mood_cs_lag = lead(rem_mem_pos_mood_cs, n=7, order_by=subjectcode),
         rem_mem_neg_mood_cs_lag = lead(rem_mem_neg_mood_cs, n=7, order_by=subjectcode),
         pos_mood_cs_lag_rem = lead(pos_mood_cs, n=7, order_by=subjectcode),
         neg_mood_cs_lag_rem = lead(neg_mood_cs, n=7, order_by=subjectcode),
         rem_mem_pos_mood_c_lag = lead(rem_mem_pos_mood_c, n=7, order_by=subjectcode),
         rem_mem_neg_mood_c_lag = lead(rem_mem_neg_mood_c, n=7, order_by=subjectcode),
         pos_mood_c_lag_rem = lead(pos_mood_c, n=7, order_by=subjectcode),
         neg_mood_c_lag_rem = lead(neg_mood_c, n=7, order_by=subjectcode)) %>% 
  filter(!is.na(original_order)) # filter out the NA rows for the lagging of recent memory 

# Recent
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  mutate(rec_mem_pos_mood_lag = lead(rec_mem_pos_mood, n=1, order_by=subjectcode),
         rec_mem_neg_mood_lag = lead(rec_mem_neg_mood, n=1, order_by=subjectcode),
         pos_mood_lag_rec = lead(pos_mood, n=1, order_by=subjectcode),
         neg_mood_lag_rec = lead(neg_mood, n=1, order_by=subjectcode),
         rec_mem_pos_mood_cs_lag = lead(rec_mem_pos_mood_cs, n=1, order_by=subjectcode),
         rec_mem_neg_mood_cs_lag = lead(rec_mem_neg_mood_cs, n=1, order_by=subjectcode),
         pos_mood_cs_lag_rec = lead(pos_mood_cs, n=1, order_by=subjectcode),
         neg_mood_cs_lag_rec = lead(neg_mood_cs, n=1, order_by=subjectcode),
         rec_mem_pos_mood_c_lag = lead(rec_mem_pos_mood_c, n=1, order_by=subjectcode),
         rec_mem_neg_mood_c_lag = lead(rec_mem_neg_mood_c, n=1, order_by=subjectcode),
         pos_mood_c_lag_rec = lead(pos_mood_c, n=1, order_by=subjectcode),
         neg_mood_c_lag_rec = lead(neg_mood_c, n=1, order_by=subjectcode)) %>% 
  ungroup()
  

3 Descriptives

We first plot some general population descriptives, followed by some descriptives of the scales we use in the EMA weeks and compliance rates.

Population Descriptives

#Create Descriptives Tables
summary_table = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE) %>% group_by(condition) %>% summarise(
  'N' = n(),
  'Age \n M +/- SD' = paste0(round(mean(age, na.rm =T), 1), ' +/- ' , round(sd(age, na.rm =T), 1)),
  #'NA Age' = sum(is.na(age)), 
  '% Education Low' = mean(education == 0, na.rm = T)*100,
  '% Education Middle' = mean(education == 1, na.rm = T)*100,
  '% Education High' = mean(education == 2, na.rm = T)*100,
  #'NA Education' = sum(is.na(education)), 
  '% Female' = mean(gender ==1, na.rm = T)*100,
  #'NA Gender' = sum(is.na(gender))
)

#Compliance rates
df_medal_compliance_individual = df_medal_clean %>% group_by(condition, subjectcode) %>% summarise('ComplianceRate' = sum(!is.na(trigger_number))/42*100)
`summarise()` has grouped output by 'condition'. You can override using the `.groups` argument.
length(which(df_medal_compliance_individual$ComplianceRate <70))
[1] 8
df_medal_compliance = df_medal_clean %>% group_by(condition) %>% summarise('% Mean Compliance' = sum(!is.na(trigger_number))/(n_distinct(subjectcode) *42)*100) 


# Create a table
summary_table = summary_table %>% left_join(df_medal_compliance, by = 'condition')
rempsyc::nice_table(summary_table)

condition

N

Age
M +/- SD

% Education Low

% Education Middle

% Education High

% Female

% Mean Compliance

control

55

51.7 +/- 10.9

0.00

22.64

77.36

78.18

92.42

remitted

90

52.5 +/- 11.1

3.49

16.28

80.23

78.41

91.88

depressed

46

48.7 +/- 11.2

4.35

30.43

65.22

71.74

92.65



#create dataset with only unique subjectcode 
df_medal_distinct = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE)%>% select('subjectcode', 'condition', 'age', 'education', 'gender')

#Test for differences between groups 
  #Age
  df_medal_distinct %>% lm(age ~ condition, data=.) %>% summary()

Call:
lm(formula = age ~ condition, data = .)

Residuals:
    Min      1Q  Median      3Q     Max 
-26.717  -6.511   2.489   7.489  18.283 

Coefficients:
                   Estimate Std. Error t value Pr(>|t|)    
(Intercept)         51.6727     1.4876  34.736   <2e-16 ***
conditionremitted    0.8386     1.8963   0.442    0.659    
conditiondepressed  -2.9553     2.2042  -1.341    0.182    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 11.03 on 186 degrees of freedom
  (2 observations deleted due to missingness)
Multiple R-squared:  0.0192,    Adjusted R-squared:  0.008657 
F-statistic: 1.821 on 2 and 186 DF,  p-value: 0.1648
  
  #education
  chisq.test(df_medal_distinct$education, df_medal_distinct$condition)
Warning: Chi-squared approximation may be incorrect

    Pearson's Chi-squared test

data:  df_medal_distinct$education and df_medal_distinct$condition
X-squared = 5.8234, df = 4, p-value = 0.2127
  
  #gender
  chisq.test(df_medal_distinct$gender, df_medal_distinct$condition)

    Pearson's Chi-squared test

data:  df_medal_distinct$gender and df_medal_distinct$condition
X-squared = 0.84533, df = 2, p-value = 0.6553
  
#Create a Graph  
  #age
  boxplot_age <- ggplot(df_medal_distinct, aes(x=condition, y=age, fill=condition)) +
    geom_boxplot() +
    labs(x = 'Condition', y = 'Age', tag = 'C' ) +
    ggtitle('Age per Condition') +
    scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
    theme_apa() 
  ggplotly(boxplot_age)
Warning: Removed 2 rows containing non-finite values (`stat_boxplot()`).
    
    #education
    plot_educ <- ggplot(df_medal_distinct, aes(x=condition, fill = education)) +
      geom_bar(position = 'dodge') +
      labs(x='Condition', y= 'Proportion', tag = 'A' ) +
      ggtitle('Education Level per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Low' , 'Middle' , 'High'), name = 'Education')  +
      theme_apa() 
    ggplotly(plot_educ)
      
    #Gender
    plot_gender <- ggplot(df_medal_distinct, aes(x=condition, fill = gender)) +
      geom_bar(position = 'dodge') +
      labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
      ggtitle('Gender per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
      theme_apa() 
    ggplotly(plot_gender)
      
     plot_gender_2 <-  ggstatsplot::ggbarstats(data = df_medal_distinct, x = gender, y = condition) +
        labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
        ggtitle('Gender per Condition') +
        scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
        theme_apa() 
Scale for fill is already present.
Adding another scale for fill, which will replace the existing scale.
      ggplotly(plot_gender_2)
Warning: geom_GeomLabel() has yet to be implemented in plotly.
  If you'd like to see this geom implemented,
  Please open an issue with your example code at
  https://github.com/ropensci/plotly/issues
ggpubr::ggarrange(plot_educ, plot_gender, boxplot_age, nrow=2, ncol=2)    
Warning: Removed 2 rows containing non-finite values (`stat_boxplot()`).

NA

Positive Mood

describe.by(df_medal_clean$pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))

Centred Distribution

ggplot(data= df_medal_clean, aes(x=pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))

Positive Recent Memory

describe.by(df_medal_clean$rec_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))

Positive Remote Memory

describe.by(df_medal_clean$rem_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))

Negative Mood

describe.by(df_medal_clean$neg_mood, group=df_medal_clean$condition)%>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))

Centred Distribution

ggplot(data= df_medal_clean, aes(x=neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))

ggplot(data= df_medal_clean, aes(x=neg_mood_cs)) + geom_histogram()+facet_grid(cols=vars(condition))

Negative Recent Memory

describe.by(df_medal_clean$rec_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))

Negative Remote Memory

describe.by(df_medal_clean$rem_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
Warning: describe.by is deprecated.  Please use the describeBy function

Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))

Centred Distribution

ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))

4 Main Effects

A simple linear regression analysis was done to look at the differences in positive and negative mood ratings per condition.

4.1 Linear Mixed Models for PA and NA

#positive mood 
positive_model = lmer(pos_mood ~ condition + (1|subjectcode), data = df_medal_clean)


#negative mood
negative_model = lmer(neg_mood ~ condition + (1|subjectcode), data = df_medal_clean)



asis_output(tab_model (positive_model, negative_model, 
                      show.se = T, show.df = T, show.aic = T, transform = NULL,  
                      show.stat = T, show.std = T, 
                      title = 'Condition Differences Mood Rating', dv.labels = c('PA Scaled',  'NA Scaled') )$knitr)
Condition Differences Mood Rating
  PA Scaled NA Scaled
Predictors Estimates std. Error std. Beta standardized std. Error CI standardized CI Statistic p df Estimates std. Error std. Beta standardized std. Error CI standardized CI Statistic p df
(Intercept) 7.49 0.17 0.56 0.08 7.15 – 7.83 0.40 – 0.71 43.49 <0.001 7381.00 1.14 0.20 -0.63 0.08 0.74 – 1.54 -0.79 – -0.47 5.63 <0.001 7377.00
condition [remitted] -1.12 0.22 -0.51 0.10 -1.55 – -0.69 -0.71 – -0.32 -5.13 <0.001 7381.00 1.43 0.26 0.58 0.10 0.93 – 1.93 0.38 – 0.78 5.57 <0.001 7377.00
condition [depressed] -2.90 0.26 -1.32 0.12 -3.40 – -2.40 -1.55 – -1.10 -11.38 <0.001 7381.00 3.68 0.30 1.49 0.12 3.10 – 4.27 1.26 – 1.73 12.29 <0.001 7377.00
Random Effects
σ2 2.12 2.12
τ00 1.58 subjectcode 2.20 subjectcode
ICC 0.43 0.51
N 191 subjectcode 191 subjectcode
Observations 7386 7382
Marginal R2 / Conditional R2 0.233 / 0.560 0.295 / 0.653
AIC 27174.561 27219.514

Plot


#create boxplot for negative and positive affect for each group with significance levels 
combine_data = df_medal_clean %>%
  tidyr::pivot_longer(cols=c(pos_mood, neg_mood), names_to = 'mood_type', values_to = 'mood_rating') %>% 
  dplyr::mutate(mood_type = factor(mood_type, levels = c('pos_mood' , 'neg_mood')))

# Plot
lm_plot = ggplot(combine_data, aes(x=condition, y = mood_rating, fill= condition)) + 
  geom_boxplot() +
  labs(x ='Group', y ='Affect (scaled units)' ) +
  scale_x_discrete(labels = c('control' = 'Never Depressed', 'remitted' = 'Remitted', 'depressed' = 'Depressed' )) +
  facet_wrap( ~ mood_type, ncol =3, nrow = 2, labeller  = labeller(mood_type = c('pos_mood' = 'Positive Affect', 'neg_mood'  = 'Negative Affect'))) +
  geom_signif(comparisons = list(c('control', 'remitted'), c('control' , 'depressed'), c('remitted' , 'depressed' )), map_signif_level = T, y_position = c(15, 13, 11)) +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  theme_apa() + 
  guides(fill=F)
ggplotly(lm_plot)
ggsave('plot_simple_regression.pdf', dpi = 320, width = 12, height = 8, path = "figures/")

Follow-Up

emmeans::emmeans(positive_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
$emmeans
 condition emmean    SE  df lower.CL upper.CL
 control     7.49 0.172 188     7.15     7.83
 remitted    6.37 0.135 188     6.10     6.63
 depressed   4.59 0.188 187     4.21     4.96

Degrees-of-freedom method: kenward-roger 
Confidence level used: 0.95 

$contrasts
 contrast             estimate    SE  df t.ratio p.value
 control - remitted       1.12 0.219 188   5.129  <.0001
 control - depressed      2.90 0.255 188  11.381  <.0001
 remitted - depressed     1.78 0.232 188   7.696  <.0001

Degrees-of-freedom method: kenward-roger 
P value adjustment: tukey method for comparing a family of 3 estimates 
emmeans::emmeans(negative_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
$emmeans
 condition emmean    SE  df lower.CL upper.CL
 control     1.14 0.202 188     0.74     1.54
 remitted    2.57 0.158 188     2.26     2.88
 depressed   4.82 0.221 188     4.39     5.26

Degrees-of-freedom method: kenward-roger 
Confidence level used: 0.95 

$contrasts
 contrast             estimate    SE  df t.ratio p.value
 control - remitted      -1.43 0.257 188  -5.571  <.0001
 control - depressed     -3.68 0.300 188 -12.290  <.0001
 remitted - depressed    -2.25 0.272 188  -8.282  <.0001

Degrees-of-freedom method: kenward-roger 
P value adjustment: tukey method for comparing a family of 3 estimates 

5 Hypothesis 1: Independent Models

In this section we test the first models from hypothesis 1 as stated in the pre-registraiton. These analyses look at the relationship between recent and remote emotional memory for both positive and negative affect, while also looking at group differences in these relaitonships based on depression status.

# set model families
model_families = c('gauss-link', "gauss-log", 'gauss-inv', 'Gamma-link', 'Gamma-log')
# detect cores for later
ncores = detectCores()-1

Positive Affect

First we run the positive affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects.

Recent

# Model equation
h1_post_recent_eq = "pos_mood_cs ~ 1 + condition*rec_mem_pos_mood_cs_lag + gender + age + education + (1 + rec_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_post_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_recent_models,  dv.labels = names(h1_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.478 0.378 3.737 – 5.219 <0.001 1.595 0.053 1.491 – 1.700 <0.001 0.191 0.007 0.177 – 0.205 <0.001 4.477 0.373 3.746 – 5.208 <0.001 1.593 0.054 1.488 – 1.698 <0.001
condition [remitted] -0.744 0.413 -1.554 – 0.067 0.072 -0.096 0.059 -0.212 – 0.020 0.103 0.012 0.008 -0.004 – 0.028 0.133 -0.803 0.399 -1.585 – -0.022 0.044 -0.113 0.058 -0.227 – 0.002 0.054
condition [depressed] -1.176 0.456 -2.071 – -0.281 0.010 -0.148 0.065 -0.275 – -0.020 0.023 0.017 0.009 0.000 – 0.035 0.047 -1.253 0.441 -2.117 – -0.389 0.004 -0.170 0.065 -0.296 – -0.043 0.009
rec mem pos mood cs lag 0.407 0.048 0.313 – 0.501 <0.001 0.057 0.007 0.044 – 0.070 <0.001 -0.008 0.001 -0.010 – -0.006 <0.001 0.405 0.047 0.313 – 0.496 <0.001 0.057 0.007 0.044 – 0.071 <0.001
gender [1] 0.014 0.048 -0.080 – 0.108 0.766 0.000 0.007 -0.012 – 0.013 0.946 0.000 0.001 -0.002 – 0.002 0.898 0.014 0.050 -0.085 – 0.112 0.786 -0.000 0.007 -0.014 – 0.014 0.999
age 0.001 0.002 -0.003 – 0.004 0.750 -0.000 0.000 -0.001 – 0.000 0.961 0.000 0.000 -0.000 – 0.000 0.774 0.001 0.002 -0.003 – 0.005 0.661 0.000 0.000 -0.001 – 0.001 0.958
education [1] -0.008 0.131 -0.265 – 0.249 0.952 -0.002 0.018 -0.037 – 0.033 0.905 0.000 0.002 -0.004 – 0.005 0.862 -0.011 0.137 -0.281 – 0.258 0.934 -0.004 0.019 -0.041 – 0.033 0.832
education [2] 0.018 0.126 -0.228 – 0.265 0.884 0.001 0.017 -0.032 – 0.035 0.931 -0.000 0.002 -0.005 – 0.005 0.967 0.019 0.132 -0.240 – 0.277 0.887 0.000 0.018 -0.035 – 0.036 0.991
condition [remitted] *
rec mem pos mood cs lag
0.103 0.059 -0.012 – 0.218 0.078 0.013 0.008 -0.003 – 0.029 0.112 -0.002 0.001 -0.004 – 0.001 0.143 0.112 0.057 0.001 – 0.224 0.048 0.015 0.008 -0.001 – 0.031 0.060
condition [depressed] *
rec mem pos mood cs lag
0.159 0.065 0.032 – 0.286 0.014 0.019 0.009 0.002 – 0.037 0.033 -0.002 0.001 -0.005 – 0.000 0.066 0.171 0.063 0.048 – 0.294 0.007 0.022 0.009 0.005 – 0.040 0.013
Random Effects
σ2 1.21 1.20 1.22 0.02 0.02
τ00 2.03 subjectcode 0.04 subjectcode 0.00 subjectcode 1.87 subjectcode 0.04 subjectcode
τ11 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
ICC       0.72  
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3083 3083 3083 3083 3083
Marginal R2 / Conditional R2 0.267 / NA 0.007 / NA 0.000 / NA 0.837 / 0.955 0.268 / NA
AIC 9508.589 9475.910 9495.427 9815.591 9825.604
summary(h1_pos_recent_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ 1 + condition * rec_mem_pos_mood_cs_lag + gender +      age + education + (1 + rec_mem_pos_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9475.9   9560.4  -4724.0   9447.9     3069 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-6.0700 -0.5627  0.0787  0.5945  5.5864 

Random effects:
 Groups      Name                    Variance  Std.Dev. Corr 
 subjectcode (Intercept)             0.0412740 0.20316       
             rec_mem_pos_mood_cs_lag 0.0007971 0.02823  -1.00
 Residual                            1.2048875 1.09767       
Number of obs: 3083, groups:  subjectcode, 185

Fixed effects:
                                             Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 1.595e+00  5.317e-02  30.001   <2e-16 ***
conditionremitted                          -9.617e-02  5.902e-02  -1.630   0.1032    
conditiondepressed                         -1.477e-01  6.507e-02  -2.270   0.0232 *  
rec_mem_pos_mood_cs_lag                     5.726e-02  6.711e-03   8.533   <2e-16 ***
gender1                                     4.388e-04  6.526e-03   0.067   0.9464    
age                                        -1.244e-05  2.530e-04  -0.049   0.9608    
education1                                 -2.128e-03  1.792e-02  -0.119   0.9055    
education2                                  1.481e-03  1.718e-02   0.086   0.9313    
conditionremitted:rec_mem_pos_mood_cs_lag   1.309e-02  8.226e-03   1.591   0.1116    
conditiondepressed:rec_mem_pos_mood_cs_lag  1.932e-02  9.036e-03   2.137   0.0326 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.739                                                              
cndtndprssd  -0.672  0.601                                                       
rc_mm_ps___  -0.894  0.809  0.734                                                
gender1      -0.110 -0.021 -0.007 -0.031                                         
age          -0.276 -0.007  0.019 -0.007  0.138                                  
education1   -0.315  0.015 -0.004 -0.010  0.075  0.035                           
education2   -0.342  0.024  0.007  0.002  0.014  0.075  0.925                    
cndtnr:_____  0.733 -0.994 -0.599 -0.816  0.022  0.007 -0.003 -0.015             
cndtnd:_____  0.664 -0.601 -0.993 -0.742  0.015 -0.004  0.013  0.005  0.605      
plot_diagnostics(h1_pos_recent_models)

car::vif(h1_pos_recent_models[[2]]) #Vifs were inspected without interaction and deemed OK
                                         GVIF Df GVIF^(1/(2*Df))
condition                         5796.333348  2        8.725460
rec_mem_pos_mood_cs_lag              4.225765  1        2.055667
gender                               1.058505  1        1.028837
age                                  1.058709  1        1.028936
education                            1.079972  2        1.019420
condition:rec_mem_pos_mood_cs_lag 6072.496432  2        8.827583
Follow-up

In the follow-up we look at the pairwise differences in the slopes, and the indiivuds

emmeans::emtrends(h1_pos_recent_models[[2]], pairwise ~ condition, var='rec_mem_pos_mood_cs_lag', lmerTest.limit = 3500, pbkrtest.limit = 3500 )
$emtrends
 condition rec_mem_pos_mood_cs_lag.trend      SE  df asymp.LCL asymp.UCL
 control                          0.0573 0.00671 Inf    0.0441    0.0704
 remitted                         0.0703 0.00476 Inf    0.0610    0.0797
 depressed                        0.0766 0.00606 Inf    0.0647    0.0884

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast             estimate      SE  df z.ratio p.value
 control - remitted   -0.01309 0.00823 Inf  -1.591  0.2495
 control - depressed  -0.01932 0.00904 Inf  -2.137  0.0824
 remitted - depressed -0.00623 0.00770 Inf  -0.809  0.6976

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 
#create separate dataframes 
df_medal_clean_control = df_medal_clean %>% filter(condition == 'control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition == 'remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition == 'depressed') 


m1_pos_rec_control <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  +  rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
m1_pos_rec_remitted <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1 + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
m1_pos_rec_depressed <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
asis_output(tab_model(m1_pos_rec_control, m1_pos_rec_remitted, m1_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.41 0.39 3.65 – 5.18 <0.001 3.76 0.34 3.09 – 4.42 <0.001 3.34 0.42 2.51 – 4.17 <0.001
rec mem pos mood cs lag 0.40 0.05 0.31 – 0.50 <0.001 0.51 0.03 0.44 – 0.58 <0.001 0.56 0.04 0.48 – 0.64 <0.001
gender [1] -0.05 0.08 -0.21 – 0.10 0.515 0.00 0.08 -0.14 – 0.15 0.955 0.11 0.10 -0.09 – 0.31 0.277
age 0.00 0.00 -0.00 – 0.01 0.390 0.00 0.00 -0.00 – 0.01 0.752 -0.00 0.00 -0.01 – 0.01 0.529
education [2] 0.05 0.08 -0.11 – 0.20 0.565 -0.01 0.16 -0.33 – 0.30 0.938 0.09 0.23 -0.37 – 0.55 0.692
education [1] -0.03 0.17 -0.37 – 0.31 0.849 0.06 0.24 -0.42 – 0.54 0.802
Random Effects
σ2 0.94 1.20 1.53
τ00 2.44 subjectcode 2.33 subjectcode 0.95 subjectcode
τ11 0.05 subjectcode.rec_mem_pos_mood_cs_lag 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.02 subjectcode.rec_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
N 53 subjectcode 86 subjectcode 46 subjectcode
Observations 869 1451 763
Marginal R2 / Conditional R2 0.187 / NA 0.275 / NA 0.299 / NA
AIC 2488.394 4483.637 2544.869

Remote

# Model equation
h1_pos_remote_eq = "pos_mood_cs ~ 1 +  condition*rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_remote_models, dv.labels = names(h1_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.225 0.747 3.759 – 6.690 <0.001 1.711 0.101 1.513 – 1.909 <0.001 0.174 0.013 0.148 – 0.200 <0.001 5.408 0.808 3.821 – 6.995 <0.001 1.741 0.113 1.519 – 1.964 <0.001
condition [remitted] -0.423 0.778 -1.950 – 1.104 0.587 -0.062 0.105 -0.269 – 0.145 0.554 0.009 0.014 -0.018 – 0.036 0.519 -0.681 0.828 -2.307 – 0.944 0.411 -0.133 0.117 -0.363 – 0.098 0.259
condition [depressed] -0.446 0.825 -2.066 – 1.174 0.589 -0.060 0.112 -0.279 – 0.159 0.591 0.009 0.015 -0.020 – 0.037 0.555 -0.662 0.885 -2.399 – 1.074 0.454 -0.083 0.125 -0.329 – 0.162 0.505
rem mem pos mood cs lag 0.349 0.109 0.134 – 0.563 0.001 0.047 0.015 0.018 – 0.075 0.001 -0.006 0.002 -0.010 – -0.002 0.001 0.307 0.117 0.078 – 0.536 0.009 0.042 0.016 0.010 – 0.074 0.010
gender [1] -0.104 0.101 -0.302 – 0.095 0.305 -0.015 0.014 -0.041 – 0.012 0.275 0.002 0.002 -0.002 – 0.006 0.288 -0.119 0.118 -0.350 – 0.112 0.313 -0.021 0.016 -0.053 – 0.010 0.187
age 0.001 0.004 -0.007 – 0.009 0.768 0.000 0.001 -0.001 – 0.001 0.807 -0.000 0.000 -0.000 – 0.000 0.780 0.003 0.005 -0.006 – 0.012 0.566 0.000 0.001 -0.001 – 0.001 0.873
education [1] 0.032 0.293 -0.543 – 0.608 0.912 0.004 0.040 -0.075 – 0.083 0.923 -0.001 0.006 -0.012 – 0.010 0.909 -0.015 0.332 -0.667 – 0.637 0.963 -0.000 0.046 -0.091 – 0.091 0.998
education [2] 0.092 0.283 -0.463 – 0.647 0.745 0.012 0.039 -0.064 – 0.088 0.760 -0.002 0.005 -0.012 – 0.009 0.752 0.054 0.320 -0.574 – 0.682 0.867 0.007 0.045 -0.080 – 0.095 0.868
condition [remitted] *
rem mem pos mood cs lag
0.088 0.131 -0.168 – 0.344 0.500 0.013 0.017 -0.022 – 0.047 0.471 -0.002 0.002 -0.006 – 0.003 0.433 0.137 0.141 -0.139 – 0.414 0.330 0.025 0.020 -0.014 – 0.063 0.207
condition [depressed] *
rem mem pos mood cs lag
0.087 0.138 -0.185 – 0.358 0.531 0.011 0.018 -0.025 – 0.047 0.542 -0.002 0.002 -0.006 – 0.003 0.509 0.132 0.151 -0.164 – 0.428 0.381 0.016 0.021 -0.025 – 0.057 0.446
Random Effects
σ2 1.33 1.33 1.37 0.02 0.02
τ00 3.96 subjectcode 0.07 subjectcode 0.00 subjectcode 6.47 subjectcode 0.13 subjectcode
τ11 0.11 subjectcode.rem_mem_pos_mood_cs_lag 0.00 subjectcode.rem_mem_pos_mood_cs_lag 0.00 subjectcode.rem_mem_pos_mood_cs_lag 0.18 subjectcode.rem_mem_pos_mood_cs_lag 0.00 subjectcode.rem_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -0.99 subjectcode -0.99 subjectcode
ICC       0.95 0.25
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 774 774 774 774 774
Marginal R2 / Conditional R2 0.159 / NA 0.003 / NA 0.000 / NA 0.380 / 0.966 0.144 / 0.355
AIC 2534.494 2518.762 2523.992 2499.565 2515.410
summary(h1_pos_remote_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ 1 + condition * rem_mem_pos_mood_cs_lag + gender +      age + education + (1 + rem_mem_pos_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2518.8   2583.9  -1245.4   2490.8      760 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0388 -0.5322  0.1286  0.5985  3.4050 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr 
 subjectcode (Intercept)             0.066989 0.25882       
             rem_mem_pos_mood_cs_lag 0.001771 0.04209  -1.00
 Residual                            1.334781 1.15533       
Number of obs: 774, groups:  subjectcode, 184

Fixed effects:
                                             Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 1.7108232  0.1008706  16.961  < 2e-16 ***
conditionremitted                          -0.0623392  0.1053769  -0.592  0.55413    
conditiondepressed                         -0.0600107  0.1116406  -0.538  0.59090    
rem_mem_pos_mood_cs_lag                     0.0467398  0.0146023   3.201  0.00137 ** 
gender1                                    -0.0148265  0.0135729  -1.092  0.27467    
age                                         0.0001301  0.0005332   0.244  0.80720    
education1                                  0.0038793  0.0400996   0.097  0.92293    
education2                                  0.0118026  0.0386561   0.305  0.76012    
conditionremitted:rem_mem_pos_mood_cs_lag   0.0126092  0.0174900   0.721  0.47095    
conditiondepressed:rem_mem_pos_mood_cs_lag  0.0112342  0.0184320   0.609  0.54220    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.728                                                              
cndtndprssd  -0.695  0.654                                                       
rm_mm_ps___  -0.857  0.826  0.780                                                
gender1      -0.122 -0.030 -0.022 -0.040                                         
age          -0.305 -0.011  0.023 -0.005  0.137                                  
education1   -0.371  0.015  0.011 -0.013  0.086  0.022                           
education2   -0.402  0.028  0.024  0.002  0.025  0.061  0.934                    
cndtnr:_____  0.720 -0.992 -0.651 -0.835  0.032  0.012 -0.003 -0.018             
cndtnd:_____  0.683 -0.655 -0.990 -0.792  0.033  0.001 -0.001 -0.010  0.661      
plot_diagnostics(h1_pos_remote_models)

car::vif(h1_pos_remote_models[[2]]) #VIFs without interactions were fine
                                         GVIF Df GVIF^(1/(2*Df))
condition                         2527.565511  2        7.090479
rem_mem_pos_mood_cs_lag              4.999326  1        2.235917
gender                               1.066610  1        1.032768
age                                  1.066420  1        1.032676
education                            1.086300  2        1.020910
condition:rem_mem_pos_mood_cs_lag 2728.004160  2        7.227053
Follow-up
emmeans::emtrends(h1_pos_remote_models[[4]], pairwise ~ condition, var='rem_mem_pos_mood_cs_lag' )
$emtrends
 condition rem_mem_pos_mood_cs_lag.trend     SE  df asymp.LCL asymp.UCL
 control                           0.307 0.1167 Inf     0.078     0.535
 remitted                          0.444 0.0789 Inf     0.289     0.599
 depressed                         0.439 0.0951 Inf     0.252     0.625

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast             estimate    SE  df z.ratio p.value
 control - remitted   -0.13730 0.141 Inf  -0.975  0.5929
 control - depressed  -0.13206 0.151 Inf  -0.877  0.6548
 remitted - depressed  0.00523 0.124 Inf   0.042  0.9990

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 

m1_pos_rem_control <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_control,
                    control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rem_remitted <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_remitted,
                     control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
m1_pos_rem_depressed <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                       data=df_medal_clean_depressed,
                       control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))
boundary (singular) fit: see help('isSingular')
asis_output(tab_model(m1_pos_rem_control, m1_pos_rem_remitted, m1_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.32 0.87 3.62 – 7.03 <0.001 4.96 0.70 3.58 – 6.33 <0.001 4.55 0.78 3.01 – 6.09 <0.001
rem mem pos mood cs lag 0.33 0.13 0.08 – 0.59 0.011 0.43 0.07 0.29 – 0.57 <0.001 0.45 0.07 0.31 – 0.58 <0.001
gender [1] -0.08 0.17 -0.41 – 0.24 0.613 -0.08 0.17 -0.42 – 0.26 0.645 -0.15 0.20 -0.55 – 0.25 0.469
age 0.00 0.01 -0.01 – 0.01 0.801 0.00 0.01 -0.01 – 0.01 0.908 0.00 0.01 -0.01 – 0.02 0.763
education [2] 0.04 0.17 -0.29 – 0.37 0.792 0.00 0.40 -0.78 – 0.78 0.994 0.20 0.50 -0.79 – 1.18 0.697
education [1] -0.16 0.42 -0.99 – 0.67 0.704 0.26 0.52 -0.76 – 1.28 0.614
Random Effects
σ2 0.73 1.56 1.52
τ00 13.15 subjectcode 3.36 subjectcode 0.90 subjectcode
τ11 0.39 subjectcode.rem_mem_pos_mood_cs_lag 0.08 subjectcode.rem_mem_pos_mood_cs_lag 0.02 subjectcode.rem_mem_pos_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
ICC 0.37    
N 53 subjectcode 85 subjectcode 46 subjectcode
Observations 225 358 191
Marginal R2 / Conditional R2 0.080 / 0.419 0.148 / NA 0.203 / NA
AIC 657.151 1232.815 658.982
NA

Negative Affect

We next run the negative affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects

Recent

# Model equation
h1_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag  + gender + age + education + (1 + rec_mem_neg_mood_cs_lag | subjectcode)"
# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = 'remove') %dopar% {
  fit_all_mods(h1_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_recent_models,  dv.labels = names(h1_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.287 0.436 3.431 – 5.142 <0.001 1.548 0.069 1.414 – 1.683 <0.001 0.202 0.010 0.182 – 0.222 <0.001 4.198 0.416 3.383 – 5.013 <0.001 1.530 0.066 1.401 – 1.659 <0.001
condition [remitted] -0.836 0.471 -1.759 – 0.088 0.076 -0.137 0.074 -0.283 – 0.008 0.065 0.021 0.011 -0.000 – 0.043 0.051 -0.769 0.440 -1.632 – 0.095 0.081 -0.134 0.070 -0.271 – 0.003 0.056
condition [depressed] -1.319 0.513 -2.326 – -0.313 0.010 -0.233 0.082 -0.393 – -0.072 0.005 0.037 0.012 0.013 – 0.061 0.003 -1.234 0.475 -2.164 – -0.304 0.009 -0.226 0.076 -0.376 – -0.077 0.003
rec mem neg mood cs lag 0.321 0.061 0.202 – 0.440 <0.001 0.048 0.009 0.030 – 0.067 <0.001 -0.007 0.001 -0.010 – -0.005 <0.001 0.339 0.057 0.227 – 0.450 <0.001 0.052 0.009 0.035 – 0.070 <0.001
gender [1] 0.004 0.048 -0.091 – 0.098 0.940 -0.002 0.008 -0.017 – 0.013 0.774 0.001 0.001 -0.002 – 0.003 0.623 0.004 0.049 -0.092 – 0.100 0.941 -0.002 0.008 -0.018 – 0.013 0.761
age -0.003 0.002 -0.007 – 0.000 0.081 -0.001 0.000 -0.001 – -0.000 0.021 0.000 0.000 0.000 – 0.000 0.009 -0.003 0.002 -0.007 – 0.000 0.068 -0.001 0.000 -0.001 – -0.000 0.012
education [1] -0.005 0.132 -0.264 – 0.255 0.972 -0.001 0.021 -0.042 – 0.041 0.973 0.000 0.003 -0.007 – 0.007 0.966 -0.018 0.136 -0.285 – 0.248 0.892 -0.004 0.022 -0.048 – 0.039 0.850
education [2] -0.058 0.127 -0.306 – 0.191 0.648 -0.010 0.020 -0.050 – 0.030 0.617 0.002 0.003 -0.005 – 0.008 0.610 -0.072 0.130 -0.328 – 0.183 0.578 -0.015 0.021 -0.056 – 0.027 0.494
condition [remitted] *
rec mem neg mood cs lag
0.138 0.071 -0.002 – 0.278 0.053 0.022 0.011 0.000 – 0.044 0.045 -0.003 0.002 -0.007 – -0.000 0.035 0.126 0.067 -0.005 – 0.257 0.059 0.022 0.010 0.001 – 0.042 0.038
condition [depressed] *
rec mem neg mood cs lag
0.205 0.077 0.053 – 0.357 0.008 0.035 0.012 0.011 – 0.059 0.004 -0.005 0.002 -0.009 – -0.002 0.002 0.191 0.072 0.050 – 0.332 0.008 0.034 0.011 0.012 – 0.056 0.003
Random Effects
σ2 1.22 1.21 1.23 0.03 0.03
τ00 2.54 subjectcode 0.07 subjectcode 0.00 subjectcode 1.94 subjectcode 0.05 subjectcode
τ11 0.06 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.04 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
ICC       0.73 0.07
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3081 3081 3081 3081 3081
Marginal R2 / Conditional R2 0.240 / NA 0.008 / NA 0.000 / NA 0.777 / 0.939 0.233 / 0.284
AIC 9562.290 9531.073 9556.128 9284.284 9331.722
NA
summary(h1_neg_recent_models[[4]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: Gamma  ( identity )
Formula: neg_mood_cs ~ condition * rec_mem_neg_mood_cs_lag + gender +      age + education + (1 + rec_mem_neg_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9284.3   9368.7  -4628.1   9256.3     3067 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.9985 -0.5314 -0.0718  0.4142  5.9328 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr 
 subjectcode (Intercept)             1.93657  1.3916        
             rec_mem_neg_mood_cs_lag 0.04328  0.2080   -1.00
 Residual                            0.03139  0.1772        
Number of obs: 3081, groups:  subjectcode, 185

Fixed effects:
                                            Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 4.198312   0.415649  10.101  < 2e-16 ***
conditionremitted                          -0.768523   0.440474  -1.745  0.08103 .  
conditiondepressed                         -1.233942   0.474511  -2.600  0.00931 ** 
rec_mem_neg_mood_cs_lag                     0.338924   0.056875   5.959 2.54e-09 ***
gender1                                     0.003598   0.048923   0.074  0.94137    
age                                        -0.003486   0.001908  -1.827  0.06776 .  
education1                                 -0.018395   0.135883  -0.135  0.89232    
education2                                 -0.072445   0.130312  -0.556  0.57826    
conditionremitted:rec_mem_neg_mood_cs_lag   0.126163   0.066874   1.887  0.05922 .  
conditiondepressed:rec_mem_neg_mood_cs_lag  0.190654   0.071978   2.649  0.00808 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.771                                                              
cndtndprssd  -0.725  0.673                                                       
rc_mm_ng___  -0.901  0.847  0.786                                                
gender1      -0.145  0.007  0.012  0.012                                         
age          -0.279  0.002  0.025  0.008  0.136                                  
education1   -0.316  0.009  0.015  0.002  0.077  0.035                           
education2   -0.331  0.005  0.018  0.000  0.018  0.074  0.926                    
cndtnr:_____  0.763 -0.994 -0.668 -0.850 -0.006 -0.003  0.003  0.004             
cndtnd:_____  0.713 -0.669 -0.993 -0.790 -0.005 -0.009 -0.006 -0.005  0.672      
plot_diagnostics(h1_neg_recent_models)

car::vif(h1_neg_recent_models[[4]])
                                         GVIF Df GVIF^(1/(2*Df))
condition                         4617.443293  2        8.243286
rec_mem_neg_mood_cs_lag              5.278240  1        2.297442
gender                               1.056653  1        1.027936
age                                  1.058243  1        1.028709
education                            1.077246  2        1.018776
condition:rec_mem_neg_mood_cs_lag 4797.212646  2        8.322374

Follow-up

emmeans::emtrends(h1_neg_recent_models[[4]], pairwise ~ condition, var='rec_mem_neg_mood_cs_lag' )
$emtrends
 condition rec_mem_neg_mood_cs_lag.trend     SE  df asymp.LCL asymp.UCL
 control                           0.339 0.0569 Inf     0.227     0.450
 remitted                          0.465 0.0352 Inf     0.396     0.534
 depressed                         0.530 0.0441 Inf     0.443     0.616

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast             estimate     SE  df z.ratio p.value
 control - remitted    -0.1262 0.0669 Inf  -1.887  0.1425
 control - depressed   -0.1907 0.0720 Inf  -2.649  0.0220
 remitted - depressed  -0.0645 0.0564 Inf  -1.143  0.4877

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 
emmeans::emmeans(h1_neg_recent_models[[4]], identity ~ rec_mem_neg_mood_cs_lag | condition )
$emmeans
condition = control:
 rec_mem_neg_mood_cs_lag emmean     SE  df asymp.LCL asymp.UCL
                    6.59   6.23 0.0600 Inf      6.11      6.34

condition = remitted:
 rec_mem_neg_mood_cs_lag emmean     SE  df asymp.LCL asymp.UCL
                    6.59   6.29 0.0524 Inf      6.19      6.39

condition = depressed:
 rec_mem_neg_mood_cs_lag emmean     SE  df asymp.LCL asymp.UCL
                    6.59   6.25 0.0571 Inf      6.14      6.36

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
condition = control:
 contrast                                estimate     SE  df z.ratio p.value
 rec_mem_neg_mood_cs_lag6.58993603267337     6.23 0.0600 Inf 103.680  <.0001

condition = remitted:
 contrast                                estimate     SE  df z.ratio p.value
 rec_mem_neg_mood_cs_lag6.58993603267337     6.29 0.0524 Inf 120.021  <.0001

condition = depressed:
 contrast                                estimate     SE  df z.ratio p.value
 rec_mem_neg_mood_cs_lag6.58993603267337     6.25 0.0571 Inf 109.361  <.0001

Results are averaged over the levels of: gender, education 

m1_neg_rec_control <- glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_remitted <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_depressed <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rec_control, m1_neg_rec_remitted, m1_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 3.89 0.27 3.36 – 4.43 <0.001 3.49 0.32 2.87 – 4.11 <0.001 3.19 0.38 2.44 – 3.94 <0.001
rec mem neg mood cs lag 0.37 0.03 0.31 – 0.42 <0.001 0.42 0.03 0.37 – 0.47 <0.001 0.52 0.03 0.46 – 0.57 <0.001
gender [1] -0.05 0.07 -0.20 – 0.09 0.459 0.01 0.08 -0.15 – 0.18 0.876 -0.05 0.10 -0.25 – 0.15 0.605
age -0.00 0.00 -0.01 – 0.00 0.632 -0.00 0.00 -0.01 – 0.00 0.590 -0.00 0.00 -0.01 – 0.01 0.815
education [2] 0.01 0.07 -0.13 – 0.15 0.905 0.09 0.18 -0.26 – 0.44 0.615 -0.34 0.24 -0.81 – 0.13 0.158
education [1] 0.08 0.19 -0.30 – 0.46 0.677 -0.16 0.25 -0.65 – 0.33 0.529
Random Effects
σ2 0.02 0.04 0.04
τ00      
τ00      
τ11 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag
ρ01      
ρ01      
ICC   0.07  
N 53 subjectcode 86 subjectcode 46 subjectcode
Observations 868 1450 763
Marginal R2 / Conditional R2 0.902 / NA 0.886 / 0.895 0.937 / NA
AIC 2330.748 4659.061 2505.025

Remote

# Model equation
h1_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag  + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_remote_models,   dv.labels = names(h1_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.062 0.564 2.954 – 5.170 <0.001 1.504 0.089 1.330 – 1.678 <0.001 0.208 0.014 0.181 – 0.235 <0.001 3.945 0.575 2.816 – 5.075 <0.001 1.479 0.095 1.291 – 1.666 <0.001
condition [remitted] 0.211 0.499 -0.768 – 1.190 0.672 0.030 0.077 -0.122 – 0.181 0.700 -0.004 0.012 -0.027 – 0.019 0.737 0.189 0.483 -0.759 – 1.137 0.695 0.017 0.080 -0.140 – 0.173 0.836
condition [depressed] 0.337 0.564 -0.770 – 1.443 0.551 0.038 0.089 -0.138 – 0.214 0.670 -0.003 0.014 -0.031 – 0.024 0.822 0.365 0.536 -0.688 – 1.418 0.497 0.035 0.090 -0.142 – 0.212 0.697
rem mem neg mood cs lag 0.367 0.075 0.220 – 0.515 <0.001 0.055 0.011 0.034 – 0.077 <0.001 -0.008 0.002 -0.011 – -0.005 <0.001 0.379 0.075 0.232 – 0.526 <0.001 0.058 0.012 0.034 – 0.081 <0.001
gender [1] 0.068 0.105 -0.137 – 0.274 0.514 0.011 0.017 -0.022 – 0.044 0.508 -0.002 0.003 -0.007 – 0.003 0.494 0.035 0.114 -0.189 – 0.260 0.759 0.007 0.019 -0.030 – 0.043 0.729
age -0.000 0.004 -0.008 – 0.008 0.904 -0.000 0.001 -0.001 – 0.001 0.810 0.000 0.000 -0.000 – 0.000 0.713 0.001 0.004 -0.008 – 0.009 0.863 0.000 0.001 -0.001 – 0.002 0.883
education [1] 0.305 0.310 -0.304 – 0.913 0.326 0.047 0.050 -0.052 – 0.146 0.355 -0.007 0.008 -0.023 – 0.009 0.384 0.299 0.331 -0.351 – 0.949 0.366 0.046 0.055 -0.062 – 0.153 0.405
education [2] 0.110 0.299 -0.477 – 0.697 0.713 0.015 0.049 -0.081 – 0.111 0.752 -0.002 0.008 -0.018 – 0.014 0.786 0.131 0.318 -0.495 – 0.756 0.682 0.018 0.053 -0.085 – 0.122 0.728
condition [remitted] *
rem mem neg mood cs lag
-0.045 0.092 -0.226 – 0.137 0.629 -0.006 0.014 -0.034 – 0.021 0.651 0.001 0.002 -0.003 – 0.005 0.682 -0.045 0.092 -0.226 – 0.137 0.629 -0.004 0.015 -0.034 – 0.025 0.772
condition [depressed] *
rem mem neg mood cs lag
-0.056 0.104 -0.260 – 0.148 0.592 -0.006 0.016 -0.038 – 0.025 0.707 0.000 0.002 -0.004 – 0.005 0.861 -0.063 0.102 -0.264 – 0.138 0.540 -0.005 0.017 -0.038 – 0.027 0.744
Random Effects
σ2 1.45 1.44 1.44 0.04 0.04
τ00          
τ00          
τ11 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag
ρ01          
ρ01          
ICC 0.00     0.72 0.07
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 773 773 773 773 773
Marginal R2 / Conditional R2 0.097 / 0.098 0.003 / NA 0.000 / NA 0.559 / 0.877 0.097 / 0.157
AIC 2531.522 2499.854 2502.440 2501.461 2500.895
NA
summary(h1_neg_remote_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: neg_mood_cs ~ condition * rem_mem_neg_mood_cs_lag + gender +      age + education + (0 + rem_mem_neg_mood_cs_lag | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2499.9   2555.7  -1237.9   2475.9      761 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.7669 -0.5588 -0.0890  0.4442  3.5675 

Random effects:
 Groups      Name                    Variance Std.Dev.
 subjectcode rem_mem_neg_mood_cs_lag 0.000    0.000   
 Residual                            1.437    1.199   
Number of obs: 773, groups:  subjectcode, 184

Fixed effects:
                                             Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                 1.5040613  0.0888436  16.929  < 2e-16 ***
conditionremitted                           0.0297090  0.0770269   0.386    0.700    
conditiondepressed                          0.0380952  0.0894826   0.426    0.670    
rem_mem_neg_mood_cs_lag                     0.0554120  0.0111490   4.970 6.69e-07 ***
gender1                                     0.0111608  0.0168517   0.662    0.508    
age                                        -0.0001571  0.0006535  -0.240    0.810    
education1                                  0.0466883  0.0504789   0.925    0.355    
education2                                  0.0154500  0.0488786   0.316    0.752    
conditionremitted:rem_mem_neg_mood_cs_lag  -0.0062889  0.0138796  -0.453    0.650    
conditiondepressed:rem_mem_neg_mood_cs_lag -0.0060319  0.0160570  -0.376    0.707    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
             (Intr) cndtnr cndtnd r_____ gendr1 age    edctn1 edctn2 cndtnr:_____
condtnrmttd  -0.572                                                              
cndtndprssd  -0.505  0.552                                                       
rm_mm_ng___  -0.681  0.783  0.674                                                
gender1      -0.223 -0.009  0.018 -0.009                                         
age          -0.439  0.011  0.046  0.010  0.138                                  
education1   -0.548  0.019  0.006 -0.007  0.098  0.025                           
education2   -0.578  0.026  0.015  0.002  0.040  0.062  0.940                    
cndtnr:_____  0.551 -0.977 -0.542 -0.804  0.012 -0.009  0.000 -0.011             
cndtnd:_____  0.470 -0.543 -0.976 -0.694 -0.002 -0.011  0.010  0.009  0.557      
plot_diagnostics(h1_neg_remote_models)

car::vif(h1_neg_remote_models[[2]])
                                        GVIF Df GVIF^(1/(2*Df))
condition                         471.492059  2        4.659814
rem_mem_neg_mood_cs_lag             3.754178  1        1.937570
gender                              1.065883  1        1.032416
age                                 1.066221  1        1.032580
education                           1.087516  2        1.021195
condition:rem_mem_neg_mood_cs_lag 534.353082  2        4.807918

Follow-up

emmeans::emtrends(h1_neg_remote_models[[2]], pairwise ~ condition, var='rem_mem_neg_mood_cs_lag' )
$emtrends
 condition rem_mem_neg_mood_cs_lag.trend      SE  df asymp.LCL asymp.UCL
 control                          0.0554 0.01115 Inf    0.0336    0.0773
 remitted                         0.0491 0.00826 Inf    0.0329    0.0653
 depressed                        0.0494 0.01156 Inf    0.0267    0.0720

Results are averaged over the levels of: gender, education 
Confidence level used: 0.95 

$contrasts
 contrast              estimate     SE  df z.ratio p.value
 control - remitted    0.006289 0.0139 Inf   0.453  0.8930
 control - depressed   0.006032 0.0161 Inf   0.376  0.9252
 remitted - depressed -0.000257 0.0142 Inf  -0.018  0.9998

Results are averaged over the levels of: gender, education 
P value adjustment: tukey method for comparing a family of 3 estimates 

m1_neg_rem_control <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_remitted <- glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_depressed <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rem_control, m1_neg_rem_remitted, m1_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.30 0.49 3.34 – 5.26 <0.001 4.26 0.71 2.85 – 5.66 <0.001 3.90 0.82 2.28 – 5.52 <0.001
rem mem neg mood cs lag 0.37 0.06 0.26 – 0.48 <0.001 0.33 0.06 0.22 – 0.45 <0.001 0.32 0.08 0.17 – 0.47 <0.001
gender [1] 0.04 0.15 -0.27 – 0.34 0.815 0.10 0.20 -0.29 – 0.49 0.624 0.07 0.22 -0.37 – 0.50 0.769
age -0.00 0.01 -0.01 – 0.01 0.825 0.00 0.01 -0.01 – 0.01 0.887 0.00 0.01 -0.01 – 0.02 0.718
education [2] -0.06 0.16 -0.37 – 0.25 0.692 -0.10 0.46 -1.00 – 0.80 0.828 0.48 0.52 -0.54 – 1.50 0.355
education [1] 0.46 0.50 -0.52 – 1.44 0.354 0.34 0.53 -0.72 – 1.39 0.530
Random Effects
σ2 0.02 0.04 0.04
τ00      
τ00      
τ11 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag
ρ01      
ρ01      
ICC 0.67 0.74 0.62
N 53 subjectcode 85 subjectcode 46 subjectcode
Observations 225 357 191
Marginal R2 / Conditional R2 0.726 / 0.911 0.546 / 0.881 0.592 / 0.844
AIC 612.907 1211.096 661.114

6 Hypothesis 2: Current Affect Moderation

For H2, we want to see whether current affective states moderate the ability to recall previous affective states. That is, do current negative affect feelings moderate the relationship between affect and recall?

#create separate dataframes for each group
df_medal_clean_control = df_medal_clean %>% filter(condition=='control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition=='remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition=='depressed')

#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_clean_remote = df_medal_clean %>% filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717))

#create separate dataframes per condition 
df_medal_clean_remote_control = df_medal_clean_remote %>% filter(condition=='control')
df_medal_clean_remote_remitted = df_medal_clean_remote %>% filter(condition=='remitted')
df_medal_clean_remote_depressed = df_medal_clean_remote %>% filter(condition=='depressed')

Positive Affect

Recent

An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted.

h2_pos_recent_eq = "pos_mood_cs ~ condition*rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_pos_mood_cs_lag + pos_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_recent_models,  dv.labels = names(h2_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.153 1.038 3.117 – 7.188 <0.001 1.535 0.153 1.236 – 1.835 <0.001 0.226 0.023 0.180 – 0.271 <0.001 4.398 1.054 2.331 – 6.464 <0.001 1.433 0.140 1.159 – 1.708 <0.001
condition [remitted] -1.253 1.209 -3.624 – 1.117 0.300 -0.130 0.180 -0.482 – 0.223 0.471 0.007 0.027 -0.047 – 0.060 0.807 -0.390 1.195 -2.733 – 1.953 0.744 -0.038 0.162 -0.356 – 0.279 0.813
condition [depressed] -3.786 1.341 -6.416 – -1.156 0.005 -0.568 0.200 -0.960 – -0.176 0.005 0.071 0.030 0.012 – 0.129 0.018 -2.707 1.313 -5.281 – -0.133 0.039 -0.502 0.179 -0.853 – -0.150 0.005
rec mem pos mood cs lag 0.081 0.144 -0.202 – 0.364 0.577 0.035 0.021 -0.006 – 0.075 0.094 -0.009 0.003 -0.015 – -0.002 0.006 0.159 0.149 -0.133 – 0.451 0.287 0.043 0.020 0.003 – 0.082 0.033
pos mood cs lag rec -0.038 0.142 -0.316 – 0.239 0.786 0.017 0.020 -0.023 – 0.057 0.414 -0.006 0.003 -0.012 – 0.000 0.052 0.063 0.146 -0.223 – 0.350 0.666 0.028 0.020 -0.011 – 0.067 0.154
gender [1] 0.016 0.047 -0.076 – 0.107 0.735 0.002 0.006 -0.011 – 0.014 0.801 -0.000 0.001 -0.002 – 0.002 0.869 0.012 0.049 -0.085 – 0.108 0.810 0.001 0.007 -0.013 – 0.014 0.923
age 0.000 0.002 -0.003 – 0.004 0.956 -0.000 0.000 -0.001 – 0.000 0.837 0.000 0.000 -0.000 – 0.000 0.717 0.001 0.002 -0.003 – 0.004 0.749 0.000 0.000 -0.001 – 0.001 0.973
education [1] -0.015 0.128 -0.266 – 0.237 0.910 -0.004 0.018 -0.038 – 0.031 0.841 0.001 0.002 -0.004 – 0.005 0.814 -0.040 0.135 -0.304 – 0.224 0.766 -0.007 0.019 -0.043 – 0.030 0.716
education [2] 0.021 0.123 -0.220 – 0.262 0.865 0.001 0.017 -0.032 – 0.034 0.941 -0.000 0.002 -0.005 – 0.004 0.987 0.007 0.129 -0.246 – 0.261 0.956 -0.000 0.018 -0.035 – 0.035 0.981
condition [remitted] *
rec mem pos mood cs lag
0.220 0.172 -0.116 – 0.557 0.200 0.024 0.025 -0.025 – 0.072 0.341 -0.002 0.004 -0.009 – 0.006 0.655 0.121 0.174 -0.220 – 0.461 0.487 0.012 0.024 -0.034 – 0.059 0.600
condition [depressed] *
rec mem pos mood cs lag
0.564 0.192 0.186 – 0.941 0.003 0.082 0.028 0.027 – 0.136 0.003 -0.010 0.004 -0.018 – -0.002 0.014 0.442 0.193 0.063 – 0.822 0.022 0.076 0.027 0.024 – 0.128 0.004
condition [remitted] *
pos mood cs lag rec
0.073 0.167 -0.255 – 0.401 0.663 0.004 0.024 -0.043 – 0.052 0.857 0.001 0.004 -0.006 – 0.008 0.838 -0.061 0.168 -0.390 – 0.268 0.718 -0.007 0.023 -0.053 – 0.038 0.749
condition [depressed] *
pos mood cs lag rec
0.403 0.184 0.042 – 0.764 0.029 0.062 0.027 0.009 – 0.114 0.021 -0.008 0.004 -0.015 – 0.000 0.052 0.242 0.184 -0.118 – 0.602 0.188 0.053 0.026 0.003 – 0.103 0.039
rec mem pos mood cs lag *
pos mood cs lag rec
0.036 0.019 -0.001 – 0.074 0.054 0.002 0.003 -0.003 – 0.007 0.502 0.000 0.000 -0.001 – 0.001 0.496 0.026 0.020 -0.013 – 0.065 0.196 0.001 0.003 -0.004 – 0.006 0.704
(condition [remitted]
rec mem pos mood cs lag)
pos mood cs lag rec
-0.016 0.022 -0.060 – 0.028 0.474 -0.001 0.003 -0.008 – 0.005 0.670 -0.000 0.000 -0.001 – 0.001 0.996 -0.000 0.023 -0.046 – 0.045 0.989 0.000 0.003 -0.006 – 0.006 0.991
(condition [depressed]
rec mem pos mood cs lag)
pos mood cs lag rec
-0.061 0.025 -0.110 – -0.013 0.014 -0.009 0.003 -0.016 – -0.002 0.010 0.001 0.001 0.000 – 0.002 0.033 -0.043 0.025 -0.093 – 0.007 0.092 -0.008 0.003 -0.015 – -0.001 0.018
Random Effects
σ2 1.11 1.11 1.13 0.02 0.02
τ00 2.29 subjectcode 0.05 subjectcode 0.00 subjectcode 2.44 subjectcode 0.00 subjectcode
τ11 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag 0.04 subjectcode.rec_mem_pos_mood_cs_lag 0.00 subjectcode.rec_mem_pos_mood_cs_lag
0.04 subjectcode.pos_mood_cs_lag_rec 0.00 subjectcode.pos_mood_cs_lag_rec 0.00 subjectcode.pos_mood_cs_lag_rec 0.04 subjectcode.pos_mood_cs_lag_rec 0.00 subjectcode.pos_mood_cs_lag_rec
ρ01 -0.52 -0.57 -0.61 -0.54 1.00
-0.53 -0.53 -0.53 -0.53 -1.00
ICC       0.84  
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3083 3083 3083 3083 3083
Marginal R2 / Conditional R2 0.304 / NA 0.008 / NA 0.000 / NA 0.788 / 0.966 0.307 / NA
AIC 9387.969 9326.329 9347.960 9550.516 9729.705
NA
NA

Due to singularity this was determined to be the best fit for the moddel

summary(h2_pos_recent_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ condition * rec_mem_pos_mood_cs_lag * pos_mood_cs_lag_rec +      gender + age + education + (1 + rec_mem_pos_mood_cs_lag +      pos_mood_cs_lag_rec | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9326.3   9465.1  -4640.2   9280.3     3060 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-6.2529 -0.5509  0.0777  0.5755  5.6576 

Random effects:
 Groups      Name                    Variance  Std.Dev. Corr       
 subjectcode (Intercept)             0.0484874 0.22020             
             rec_mem_pos_mood_cs_lag 0.0007735 0.02781  -0.57      
             pos_mood_cs_lag_rec     0.0006744 0.02597  -0.53 -0.39
 Residual                            1.1106021 1.05385             
Number of obs: 3083, groups:  subjectcode, 185

Fixed effects:
                                                                 Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                                     1.535e+00  1.528e-01  10.049  < 2e-16 ***
conditionremitted                                              -1.296e-01  1.797e-01  -0.721  0.47075    
conditiondepressed                                             -5.680e-01  2.001e-01  -2.839  0.00453 ** 
rec_mem_pos_mood_cs_lag                                         3.479e-02  2.076e-02   1.676  0.09370 .  
pos_mood_cs_lag_rec                                             1.662e-02  2.034e-02   0.817  0.41391    
gender1                                                         1.603e-03  6.363e-03   0.252  0.80106    
age                                                            -5.058e-05  2.465e-04  -0.205  0.83740    
education1                                                     -3.512e-03  1.756e-02  -0.200  0.84146    
education2                                                      1.255e-03  1.685e-02   0.074  0.94065    
conditionremitted:rec_mem_pos_mood_cs_lag                       2.357e-02  2.476e-02   0.952  0.34120    
conditiondepressed:rec_mem_pos_mood_cs_lag                      8.163e-02  2.773e-02   2.944  0.00324 ** 
conditionremitted:pos_mood_cs_lag_rec                           4.374e-03  2.425e-02   0.180  0.85685    
conditiondepressed:pos_mood_cs_lag_rec                          6.151e-02  2.669e-02   2.305  0.02116 *  
rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec                     1.790e-03  2.667e-03   0.671  0.50195    
conditionremitted:rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec  -1.352e-03  3.174e-03  -0.426  0.67007    
conditiondepressed:rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec -8.976e-03  3.499e-03  -2.565  0.01032 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
plot_diagnostics(h2_pos_recent_models)

car::vif(h2_pos_recent_models[[2]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             5.368158e+05  2       27.068010
rec_mem_pos_mood_cs_lag                               4.563112e+01  1        6.755081
pos_mood_cs_lag_rec                                   4.789222e+01  1        6.920421
gender                                                1.064290e+00  1        1.031644
age                                                   1.061207e+00  1        1.030149
education                                             1.090477e+00  2        1.021890
condition:rec_mem_pos_mood_cs_lag                     5.715056e+05  2       27.495088
condition:pos_mood_cs_lag_rec                         5.985419e+05  2       27.814653
rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec           1.209767e+02  1       10.998939
condition:rec_mem_pos_mood_cs_lag:pos_mood_cs_lag_rec 5.607270e+05  2       27.364522

Follow-up

m2_pos_rec_control <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_control)

m2_pos_rec_remitted <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_remitted)

m2_pos_rec_depressed <-  lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_depressed)

asis_output(tab_model(m2_pos_rec_control, m2_pos_rec_remitted, m2_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr)
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 4.36 0.88 2.64 – 6.09 <0.001 3.61 0.67 2.30 – 4.92 <0.001 0.71 0.95 -1.16 – 2.58 0.458
rec mem pos mood cs lag 0.16 0.12 -0.07 – 0.40 0.177 0.36 0.09 0.19 – 0.53 <0.001 0.73 0.13 0.47 – 0.99 <0.001
pos mood cs lag rec 0.05 0.12 -0.18 – 0.29 0.651 0.07 0.09 -0.10 – 0.24 0.424 0.44 0.12 0.20 – 0.68 <0.001
gender [1] -0.07 0.08 -0.23 – 0.09 0.398 0.01 0.08 -0.14 – 0.16 0.870 0.14 0.10 -0.05 – 0.34 0.148
age 0.00 0.00 -0.00 – 0.01 0.467 0.00 0.00 -0.00 – 0.01 0.748 -0.00 0.00 -0.01 – 0.01 0.481
education [2] 0.07 0.08 -0.09 – 0.23 0.383 -0.03 0.16 -0.34 – 0.29 0.864 0.11 0.23 -0.34 – 0.57 0.624
rec mem pos mood cs lag *
pos mood cs lag rec
0.03 0.02 -0.01 – 0.06 0.118 0.01 0.01 -0.01 – 0.04 0.257 -0.03 0.02 -0.07 – -0.00 0.045
education [1] -0.03 0.17 -0.37 – 0.31 0.859 0.08 0.24 -0.39 – 0.56 0.730
Observations 869 1451 763
R2 / R2 adjusted 0.247 / 0.242 0.288 / 0.285 0.328 / 0.321
AIC 2428.646 4435.026 2483.576

Remote

An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted.

h2_pos_remote_eq = "pos_mood_cs ~ condition*rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem + gender + age + education + (1 | subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_remote_models,  dv.labels = names(h2_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 0.962 2.389 -3.727 – 5.652 0.687 1.205 0.291 0.633 – 1.777 <0.001 0.223 0.034 0.155 – 0.290 <0.001 0.884 2.478 -3.980 – 5.748 0.721 1.189 0.334 0.534 – 1.844 <0.001
condition [remitted] 6.916 2.976 1.075 – 12.758 0.020 0.877 0.380 0.132 – 1.623 0.021 -0.100 0.048 -0.193 – -0.006 0.036 7.105 3.013 1.189 – 13.021 0.019 0.903 0.413 0.091 – 1.714 0.029
condition [depressed] 4.329 3.051 -1.661 – 10.318 0.156 0.531 0.391 -0.236 – 1.298 0.174 -0.056 0.050 -0.153 – 0.041 0.258 4.280 3.093 -1.793 – 10.352 0.167 0.545 0.424 -0.288 – 1.377 0.200
rem mem pos mood cs lag 1.183 0.373 0.450 – 1.915 0.002 0.144 0.042 0.061 – 0.227 0.001 -0.016 0.004 -0.025 – -0.007 <0.001 1.213 0.410 0.408 – 2.018 0.003 0.147 0.052 0.045 – 0.249 0.005
pos mood cs lag rem 0.491 0.320 -0.137 – 1.119 0.125 0.059 0.040 -0.020 – 0.137 0.142 -0.006 0.005 -0.015 – 0.004 0.250 0.433 0.329 -0.212 – 1.079 0.188 0.054 0.045 -0.034 – 0.142 0.229
gender [1] -0.101 0.106 -0.308 – 0.107 0.341 -0.013 0.014 -0.041 – 0.014 0.348 0.002 0.002 -0.002 – 0.005 0.356 -0.148 0.124 -0.392 – 0.095 0.232 -0.018 0.016 -0.050 – 0.014 0.280
age 0.001 0.004 -0.007 – 0.009 0.851 0.000 0.001 -0.001 – 0.001 0.852 -0.000 0.000 -0.000 – 0.000 0.860 0.001 0.005 -0.009 – 0.010 0.897 0.000 0.001 -0.001 – 0.001 0.943
education [1] 0.011 0.304 -0.587 – 0.608 0.972 0.002 0.041 -0.079 – 0.083 0.954 -0.000 0.006 -0.012 – 0.011 0.931 -0.069 0.350 -0.755 – 0.617 0.843 -0.005 0.047 -0.097 – 0.086 0.909
education [2] 0.068 0.293 -0.508 – 0.644 0.816 0.010 0.040 -0.068 – 0.088 0.805 -0.001 0.005 -0.012 – 0.009 0.794 0.010 0.337 -0.651 – 0.671 0.976 0.004 0.045 -0.084 – 0.093 0.922
condition [remitted] *
rem mem pos mood cs lag
-1.325 0.474 -2.255 – -0.395 0.005 -0.165 0.058 -0.278 – -0.052 0.004 0.019 0.007 0.005 – 0.032 0.007 -1.411 0.503 -2.398 – -0.424 0.005 -0.173 0.066 -0.302 – -0.043 0.009
condition [depressed] *
rem mem pos mood cs lag
-0.828 0.483 -1.777 – 0.120 0.087 -0.099 0.058 -0.214 – 0.015 0.088 0.011 0.007 -0.003 – 0.024 0.130 -0.857 0.519 -1.876 – 0.163 0.099 -0.104 0.067 -0.236 – 0.028 0.124
condition [remitted] *
pos mood cs lag rem
-0.867 0.397 -1.646 – -0.089 0.029 -0.113 0.051 -0.213 – -0.012 0.028 0.013 0.007 0.000 – 0.026 0.045 -0.820 0.399 -1.604 – -0.036 0.040 -0.109 0.055 -0.217 – -0.001 0.049
condition [depressed] *
pos mood cs lag rem
-0.565 0.406 -1.362 – 0.231 0.164 -0.071 0.053 -0.175 – 0.032 0.177 0.008 0.007 -0.006 – 0.021 0.250 -0.500 0.409 -1.304 – 0.303 0.222 -0.067 0.057 -0.178 – 0.044 0.237
rem mem pos mood cs lag *
pos mood cs lag rem
-0.097 0.050 -0.196 – 0.001 0.053 -0.011 0.006 -0.023 – 0.000 0.052 0.001 0.001 -0.000 – 0.002 0.092 -0.087 0.055 -0.195 – 0.020 0.111 -0.010 0.007 -0.024 – 0.003 0.138
(condition [remitted]
rem mem pos mood cs lag)
pos mood cs lag rem
0.170 0.063 0.046 – 0.294 0.007 0.022 0.008 0.006 – 0.037 0.006 -0.002 0.001 -0.004 – -0.001 0.009 0.169 0.066 0.038 – 0.299 0.011 0.021 0.009 0.004 – 0.039 0.015
(condition [depressed]
rem mem pos mood cs lag)
pos mood cs lag rem
0.110 0.064 -0.016 – 0.236 0.087 0.014 0.008 -0.002 – 0.029 0.087 -0.001 0.001 -0.003 – 0.000 0.125 0.104 0.069 -0.030 – 0.239 0.129 0.013 0.009 -0.005 – 0.031 0.145
Random Effects
σ2 1.48 1.45 1.45 0.03 0.03
τ00 0.00 subjectcode 0.00 subjectcode 0.00 subjectcode 0.13 subjectcode 0.00 subjectcode
ICC       0.84 0.07
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 774 774 774 774 774
Marginal R2 / Conditional R2 0.162 / NA 0.004 / NA 0.000 / NA 0.677 / 0.947 0.167 / 0.225
AIC 2579.769 2522.642 2524.618 2598.960 2608.422
NA
NA

Due to singularity a regular linear model was fitted.

summary(h2_pos_remote_models[[2]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: gaussian  ( log )
Formula: pos_mood_cs ~ condition * rem_mem_pos_mood_cs_lag * pos_mood_cs_lag_rem +      gender + age + education + (1 | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2522.6   2606.4  -1243.3   2486.6      756 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.0184 -0.5312  0.1162  0.5850  4.1139 

Random effects:
 Groups      Name        Variance Std.Dev.
 subjectcode (Intercept) 0.000    0.000   
 Residual                1.451    1.205   
Number of obs: 774, groups:  subjectcode, 184

Fixed effects:
                                                                 Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                                     1.2051866  0.2914537   4.135 3.55e-05 ***
conditionremitted                                               0.8774714  0.3795533   2.312 0.020786 *  
conditiondepressed                                              0.5310702  0.3907275   1.359 0.174089    
rem_mem_pos_mood_cs_lag                                         0.1439155  0.0421589   3.414 0.000641 ***
pos_mood_cs_lag_rem                                             0.0585855  0.0398361   1.471 0.141382    
gender1                                                        -0.0132039  0.0140553  -0.939 0.347513    
age                                                             0.0001037  0.0005555   0.187 0.851963    
education1                                                      0.0023843  0.0413052   0.058 0.953968    
education2                                                      0.0098235  0.0398240   0.247 0.805161    
conditionremitted:rem_mem_pos_mood_cs_lag                      -0.1648074  0.0575540  -2.864 0.004190 ** 
conditiondepressed:rem_mem_pos_mood_cs_lag                     -0.0994787  0.0582374  -1.708 0.087607 .  
conditionremitted:pos_mood_cs_lag_rem                          -0.1127666  0.0512992  -2.198 0.027934 *  
conditiondepressed:pos_mood_cs_lag_rem                         -0.0712884  0.0527025  -1.353 0.176166    
rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem                    -0.0114331  0.0058762  -1.946 0.051694 .  
conditionremitted:rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem   0.0215715  0.0077950   2.767 0.005651 ** 
conditiondepressed:rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem  0.0135242  0.0078953   1.713 0.086726 .  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
plot_diagnostics(h2_pos_remote_models)

car::vif(h2_pos_remote_models[[2]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             4.254349e+05  2       25.539259
rem_mem_pos_mood_cs_lag                               7.121652e+01  1        8.438988
pos_mood_cs_lag_rem                                   7.223756e+01  1        8.499268
gender                                                1.070625e+00  1        1.034710
age                                                   1.081350e+00  1        1.039880
education                                             1.100255e+00  2        1.024173
condition:rem_mem_pos_mood_cs_lag                     3.583274e+05  2       24.466396
condition:pos_mood_cs_lag_rem                         4.901003e+05  2       26.458866
rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem           1.344518e+02  1       11.595334
condition:rem_mem_pos_mood_cs_lag:pos_mood_cs_lag_rem 4.094042e+05  2       25.295197

Follow-up


m2_pos_rem_control <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_control)

m2_pos_rem_remitted <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education,  
                   data=df_medal_clean_remote_remitted)

m2_pos_rem_depressed <-  lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_depressed)

asis_output(tab_model(m2_pos_rem_control, m2_pos_rem_remitted, m2_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 0.98 2.04 -3.04 – 5.01 0.630 8.06 2.00 4.12 – 12.00 <0.001 5.13 2.11 0.96 – 9.30 0.016
rem mem pos mood cs lag 1.19 0.32 0.56 – 1.82 <0.001 -0.15 0.31 -0.77 – 0.46 0.627 0.37 0.32 -0.26 – 1.00 0.245
pos mood cs lag rem 0.50 0.28 -0.05 – 1.04 0.074 -0.38 0.25 -0.88 – 0.11 0.127 -0.05 0.26 -0.56 – 0.46 0.838
gender [1] -0.10 0.17 -0.44 – 0.23 0.543 -0.11 0.18 -0.46 – 0.24 0.530 -0.16 0.20 -0.56 – 0.24 0.430
age 0.00 0.01 -0.01 – 0.01 0.948 0.00 0.01 -0.01 – 0.01 0.977 -0.00 0.01 -0.02 – 0.02 0.965
education [2] 0.03 0.17 -0.32 – 0.37 0.881 0.01 0.40 -0.78 – 0.80 0.977 0.11 0.49 -0.85 – 1.08 0.816
rem mem pos mood cs lag *
pos mood cs lag rem
-0.10 0.04 -0.18 – -0.01 0.025 0.07 0.04 -0.01 – 0.15 0.072 0.01 0.04 -0.07 – 0.09 0.798
education [1] -0.19 0.43 -1.04 – 0.65 0.652 0.18 0.51 -0.82 – 1.18 0.724
Observations 224 357 190
R2 / R2 adjusted 0.190 / 0.168 0.136 / 0.119 0.207 / 0.177
AIC 661.932 1211.438 634.332

Negative Affect

Recent

h2_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_neg_mood_cs_lag + neg_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_recent_models, dv.labels = names(h2_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 3.005 0.794 1.447 – 4.562 <0.001 1.279 0.141 1.002 – 1.556 <0.001 0.245 0.023 0.200 – 0.290 <0.001 1.988 0.734 0.550 – 3.427 0.007 0.968 0.129 0.714 – 1.221 <0.001
condition [remitted] 0.156 0.950 -1.707 – 2.019 0.870 -0.031 0.164 -0.353 – 0.291 0.850 0.018 0.027 -0.034 – 0.070 0.493 0.879 0.892 -0.870 – 2.627 0.325 0.244 0.154 -0.058 – 0.545 0.113
condition [depressed] -0.371 1.004 -2.340 – 1.598 0.712 -0.275 0.179 -0.625 – 0.076 0.124 0.078 0.030 0.020 – 0.136 0.009 0.308 0.920 -1.495 – 2.111 0.738 0.047 0.163 -0.272 – 0.366 0.773
rec mem neg mood cs lag 0.390 0.114 0.167 – 0.613 0.001 0.068 0.020 0.029 – 0.106 0.001 -0.010 0.003 -0.017 – -0.004 0.001 0.547 0.107 0.337 – 0.756 <0.001 0.114 0.018 0.078 – 0.149 <0.001
neg mood cs lag rec 0.261 0.116 0.033 – 0.488 0.025 0.051 0.020 0.010 – 0.091 0.014 -0.008 0.003 -0.015 – -0.002 0.015 0.406 0.108 0.194 – 0.618 <0.001 0.095 0.019 0.058 – 0.131 <0.001
gender [1] -0.004 0.047 -0.097 – 0.088 0.927 -0.002 0.008 -0.017 – 0.013 0.798 0.001 0.001 -0.002 – 0.003 0.680 0.005 0.049 -0.092 – 0.101 0.925 -0.001 0.008 -0.017 – 0.015 0.909
age -0.003 0.002 -0.007 – 0.000 0.068 -0.001 0.000 -0.001 – -0.000 0.026 0.000 0.000 0.000 – 0.000 0.013 -0.004 0.002 -0.007 – 0.000 0.060 -0.001 0.000 -0.001 – -0.000 0.021
education [1] -0.027 0.131 -0.283 – 0.230 0.839 -0.005 0.021 -0.046 – 0.036 0.801 0.001 0.003 -0.005 – 0.008 0.730 -0.054 0.138 -0.325 – 0.218 0.698 -0.008 0.022 -0.052 – 0.035 0.704
education [2] -0.081 0.126 -0.328 – 0.165 0.519 -0.016 0.020 -0.055 – 0.024 0.436 0.003 0.003 -0.003 – 0.009 0.370 -0.112 0.133 -0.373 – 0.148 0.398 -0.019 0.021 -0.061 – 0.023 0.373
condition [remitted] *
rec mem neg mood cs lag
-0.094 0.141 -0.371 – 0.184 0.509 -0.005 0.024 -0.051 – 0.042 0.835 -0.001 0.004 -0.009 – 0.006 0.715 -0.216 0.137 -0.484 – 0.053 0.115 -0.046 0.023 -0.091 – -0.002 0.041
condition [depressed] *
rec mem neg mood cs lag
-0.005 0.150 -0.299 – 0.290 0.976 0.032 0.026 -0.018 – 0.082 0.212 -0.010 0.004 -0.018 – -0.002 0.016 -0.114 0.142 -0.393 – 0.164 0.421 -0.018 0.024 -0.065 – 0.029 0.453
condition [remitted] *
neg mood cs lag rec
-0.088 0.144 -0.371 – 0.196 0.544 -0.007 0.025 -0.055 – 0.042 0.790 -0.001 0.004 -0.009 – 0.007 0.820 -0.192 0.139 -0.464 – 0.080 0.166 -0.046 0.023 -0.091 – -0.000 0.049
condition [depressed] *
neg mood cs lag rec
-0.124 0.155 -0.428 – 0.180 0.425 0.011 0.027 -0.042 – 0.064 0.687 -0.007 0.004 -0.015 – 0.002 0.126 -0.218 0.146 -0.504 – 0.069 0.136 -0.034 0.025 -0.083 – 0.015 0.179
rec mem neg mood cs lag *
neg mood cs lag rec
-0.018 0.014 -0.046 – 0.010 0.202 -0.004 0.003 -0.009 – 0.001 0.103 0.001 0.000 -0.000 – 0.001 0.107 -0.039 0.013 -0.066 – -0.013 0.004 -0.010 0.002 -0.015 – -0.006 <0.001
(condition [remitted]
rec mem neg mood cs lag)
neg mood cs lag rec
0.026 0.018 -0.010 – 0.062 0.161 0.003 0.003 -0.003 – 0.009 0.379 -0.000 0.000 -0.001 – 0.001 0.825 0.043 0.018 0.007 – 0.078 0.018 0.008 0.003 0.003 – 0.014 0.004
(condition [depressed]
rec mem neg mood cs lag)
neg mood cs lag rec
0.029 0.020 -0.010 – 0.067 0.147 -0.000 0.003 -0.007 – 0.006 0.956 0.001 0.001 -0.000 – 0.002 0.168 0.043 0.019 0.006 – 0.081 0.024 0.007 0.003 0.000 – 0.013 0.038
Random Effects
σ2 1.14 1.12 1.12 0.03 0.03
τ00 2.08 subjectcode 0.05 subjectcode 0.00 subjectcode 1.83 subjectcode 0.05 subjectcode
τ11 0.06 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag 0.05 subjectcode.rec_mem_neg_mood_cs_lag 0.00 subjectcode.rec_mem_neg_mood_cs_lag
0.04 subjectcode.neg_mood_cs_lag_rec 0.00 subjectcode.neg_mood_cs_lag_rec 0.00 subjectcode.neg_mood_cs_lag_rec 0.04 subjectcode.neg_mood_cs_lag_rec 0.00 subjectcode.neg_mood_cs_lag_rec
ρ01 -0.69 -0.67 -0.63 -0.64 -0.66
-0.26 -0.26 -0.30 -0.30 -0.29
ICC       0.82 0.10
N 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode 185 subjectcode
Observations 3081 3081 3081 3081 3081
Marginal R2 / Conditional R2 0.279 / NA 0.010 / NA 0.000 / NA 0.750 / 0.954 0.269 / 0.344
AIC 9455.059 9378.342 9388.237 9085.624 9124.760
NA

Check Final Model

summary(h2_neg_recent_models[[4]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: Gamma  ( identity )
Formula: neg_mood_cs ~ condition * rec_mem_neg_mood_cs_lag * neg_mood_cs_lag_rec +      gender + age + education + (1 + rec_mem_neg_mood_cs_lag +      neg_mood_cs_lag_rec | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  9085.6   9224.4  -4519.8   9039.6     3058 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-5.1267 -0.5124 -0.0743  0.4246  6.2232 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr       
 subjectcode (Intercept)             1.82741  1.3518              
             rec_mem_neg_mood_cs_lag 0.05136  0.2266   -0.64      
             neg_mood_cs_lag_rec     0.03574  0.1891   -0.30 -0.54
 Residual                            0.02884  0.1698              
Number of obs: 3081, groups:  subjectcode, 185

Fixed effects:
                                                                Estimate Std. Error t value Pr(>|z|)    
(Intercept)                                                     1.988430   0.733785   2.710 0.006732 ** 
conditionremitted                                               0.878547   0.891970   0.985 0.324648    
conditiondepressed                                              0.308193   0.919531   0.335 0.737502    
rec_mem_neg_mood_cs_lag                                         0.546805   0.106798   5.120 3.06e-07 ***
neg_mood_cs_lag_rec                                             0.405864   0.108068   3.756 0.000173 ***
gender1                                                         0.004651   0.049313   0.094 0.924859    
age                                                            -0.003610   0.001915  -1.885 0.059444 .  
education1                                                     -0.053705   0.138463  -0.388 0.698114    
education2                                                     -0.112421   0.132991  -0.845 0.397928    
conditionremitted:rec_mem_neg_mood_cs_lag                      -0.215502   0.136810  -1.575 0.115212    
conditiondepressed:rec_mem_neg_mood_cs_lag                     -0.114499   0.142207  -0.805 0.420729    
conditionremitted:neg_mood_cs_lag_rec                          -0.192039   0.138715  -1.384 0.166231    
conditiondepressed:neg_mood_cs_lag_rec                         -0.217821   0.146133  -1.491 0.136075    
rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec                    -0.039222   0.013428  -2.921 0.003490 ** 
conditionremitted:rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec   0.042920   0.018139   2.366 0.017970 *  
conditiondepressed:rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec  0.043414   0.019219   2.259 0.023885 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
plot_diagnostics(h2_neg_recent_models) 

car::vif(h2_neg_recent_models[[4]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             81235.547366  2       16.882491
rec_mem_neg_mood_cs_lag                                  20.502493  1        4.527968
neg_mood_cs_lag_rec                                      24.730039  1        4.972931
gender                                                    1.066586  1        1.032756
age                                                       1.059303  1        1.029224
education                                                 1.083623  2        1.020280
condition:rec_mem_neg_mood_cs_lag                     93718.112392  2       17.496689
condition:neg_mood_cs_lag_rec                         85640.550025  2       17.106843
rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec              45.364501  1        6.735317
condition:rec_mem_neg_mood_cs_lag:neg_mood_cs_lag_rec 64754.318868  2       15.952075

Follow-up


m2_neg_rec_control <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + 
                              gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_remitted <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_depressed <-  glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode),  
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rec_control, m2_neg_rec_remitted, m2_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 2.05 0.66 0.76 – 3.34 0.002 2.47 0.67 1.16 – 3.79 <0.001 3.04 0.72 1.62 – 4.46 <0.001
rec mem neg mood cs lag 0.52 0.09 0.35 – 0.69 <0.001 0.37 0.09 0.19 – 0.55 <0.001 0.38 0.10 0.19 – 0.58 <0.001
neg mood cs lag rec 0.37 0.09 0.19 – 0.54 <0.001 0.22 0.09 0.04 – 0.40 0.016 0.15 0.11 -0.06 – 0.35 0.161
gender [1] 0.00 0.07 -0.13 – 0.14 0.959 0.05 0.08 -0.12 – 0.21 0.588 -0.08 0.10 -0.27 – 0.11 0.401
age -0.00 0.00 -0.01 – 0.00 0.546 -0.00 0.00 -0.01 – 0.00 0.278 -0.00 0.00 -0.01 – 0.01 0.502
education [2] -0.02 0.07 -0.15 – 0.12 0.797 0.09 0.18 -0.26 – 0.43 0.623 -0.48 0.24 -0.96 – 0.00 0.051
rec mem neg mood cs lag *
neg mood cs lag rec
-0.04 0.01 -0.06 – -0.01 0.001 0.00 0.01 -0.03 – 0.03 0.970 0.01 0.01 -0.02 – 0.04 0.561
education [1] 0.10 0.19 -0.27 – 0.48 0.585 -0.33 0.25 -0.82 – 0.17 0.195
Random Effects
σ2 0.02 0.03 0.03
τ00 1.68 subjectcode 2.09 subjectcode 1.57 subjectcode
1.31 subjectcode.1 1.04 subjectcode.1 1.20 subjectcode.1
τ11 0.04 subjectcode.rec_mem_neg_mood_cs_lag 0.05 subjectcode.rec_mem_neg_mood_cs_lag 0.04 subjectcode.rec_mem_neg_mood_cs_lag
0.03 subjectcode.1.neg_mood_cs_lag_rec 0.02 subjectcode.1.neg_mood_cs_lag_rec 0.03 subjectcode.1.neg_mood_cs_lag_rec
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
-1.00 subjectcode.1 -1.00 subjectcode.1 -1.00 subjectcode.1
ICC   0.73 0.73
N 53 subjectcode 86 subjectcode 46 subjectcode
Observations 868 1450 763
Marginal R2 / Conditional R2 0.911 / NA 0.802 / 0.947 0.844 / 0.959
AIC 2147.701 4475.592 2409.196

Remote

h2_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem + gender + age + education + (1 + rem_mem_neg_mood_cs_lag + neg_mood_cs_lag_rem|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_remote_models, dv.labels = names(h2_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
  gauss-link gauss-log gauss-inv Gamma-link Gamma-log
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 5.100 2.475 0.242 – 9.958 0.040 1.814 0.467 0.897 – 2.731 <0.001 0.155 0.076 0.006 – 0.303 0.041 5.049 2.363 0.410 – 9.689 0.033 2.524 0.429 1.682 – 3.367 <0.001
condition [remitted] -1.784 2.762 -7.205 – 3.638 0.519 -0.435 0.512 -1.441 – 0.570 0.396 0.074 0.083 -0.090 – 0.237 0.377 -2.630 2.664 -7.861 – 2.600 0.324 -1.139 0.480 -2.083 – -0.196 0.018
condition [depressed] -1.912 2.797 -7.402 – 3.578 0.494 -0.451 0.517 -1.466 – 0.564 0.383 0.078 0.084 -0.088 – 0.243 0.357 -0.966 2.690 -6.248 – 4.315 0.720 -1.129 0.484 -2.079 – -0.180 0.020
rem mem neg mood cs lag 0.018 0.478 -0.919 – 0.956 0.970 -0.027 0.087 -0.197 – 0.144 0.760 0.006 0.014 -0.021 – 0.033 0.679 0.023 0.455 -0.870 – 0.917 0.959 -0.158 0.081 -0.316 – 0.001 0.052
neg mood cs lag rem -0.142 0.412 -0.952 – 0.667 0.730 -0.049 0.079 -0.204 – 0.106 0.532 0.009 0.013 -0.016 – 0.034 0.496 -0.095 0.384 -0.848 – 0.659 0.805 -0.172 0.071 -0.312 – -0.033 0.016
gender [1] 0.071 0.099 -0.124 – 0.266 0.475 0.010 0.016 -0.021 – 0.041 0.533 -0.002 0.003 -0.007 – 0.003 0.542 0.033 0.110 -0.183 – 0.250 0.762 0.004 0.018 -0.031 – 0.040 0.821
age -0.000 0.004 -0.008 – 0.007 0.918 -0.000 0.001 -0.001 – 0.001 0.947 0.000 0.000 -0.000 – 0.000 0.991 -0.002 0.004 -0.010 – 0.007 0.725 -0.000 0.001 -0.002 – 0.001 0.830
education [1] 0.315 0.291 -0.257 – 0.887 0.280 0.047 0.047 -0.045 – 0.138 0.317 -0.007 0.008 -0.022 – 0.008 0.360 0.264 0.316 -0.356 – 0.885 0.403 0.045 0.052 -0.058 – 0.147 0.391
education [2] 0.127 0.281 -0.424 – 0.679 0.651 0.017 0.045 -0.071 – 0.106 0.702 -0.002 0.007 -0.017 – 0.012 0.739 0.067 0.304 -0.529 – 0.664 0.824 0.012 0.050 -0.086 – 0.111 0.804
condition [remitted] *
rem mem neg mood cs lag
0.647 0.532 -0.397 – 1.692 0.224 0.130 0.095 -0.056 – 0.317 0.171 -0.021 0.015 -0.051 – 0.008 0.157 0.772 0.514 -0.237 – 1.781 0.134 0.256 0.091 0.079 – 0.434 0.005
condition [depressed] *
rem mem neg mood cs lag
0.583 0.539 -0.476 – 1.641 0.280 0.117 0.096 -0.071 – 0.306 0.223 -0.020 0.015 -0.049 – 0.010 0.201 0.392 0.518 -0.625 – 1.409 0.450 0.232 0.091 0.054 – 0.411 0.011
condition [remitted] *
neg mood cs lag rem
0.239 0.458 -0.660 – 1.138 0.602 0.063 0.086 -0.106 – 0.232 0.467 -0.010 0.014 -0.038 – 0.017 0.458 0.341 0.432 -0.507 – 1.190 0.430 0.184 0.079 0.028 – 0.339 0.021
condition [depressed] *
neg mood cs lag rem
0.314 0.465 -0.599 – 1.227 0.500 0.074 0.087 -0.096 – 0.245 0.394 -0.013 0.014 -0.040 – 0.015 0.374 0.086 0.437 -0.773 – 0.945 0.844 0.182 0.080 0.025 – 0.339 0.023
rem mem neg mood cs lag *
neg mood cs lag rem
0.051 0.080 -0.106 – 0.208 0.522 0.013 0.015 -0.016 – 0.042 0.380 -0.002 0.002 -0.007 – 0.002 0.346 0.047 0.075 -0.100 – 0.194 0.529 0.036 0.013 0.009 – 0.062 0.008
(condition [remitted]
rem mem neg mood cs lag)
neg mood cs lag rem
-0.096 0.087 -0.267 – 0.075 0.272 -0.020 0.016 -0.051 – 0.011 0.213 0.003 0.003 -0.002 – 0.008 0.204 -0.110 0.083 -0.272 – 0.053 0.185 -0.042 0.015 -0.071 – -0.013 0.005
(condition [depressed]
rem mem neg mood cs lag)
neg mood cs lag rem
-0.094 0.089 -0.268 – 0.080 0.290 -0.019 0.016 -0.050 – 0.012 0.236 0.003 0.003 -0.002 – 0.008 0.220 -0.050 0.084 -0.215 – 0.114 0.547 -0.038 0.015 -0.067 – -0.008 0.012
Random Effects
σ2 1.19 1.13 1.13 0.03 0.03
τ00 2.73 subjectcode 0.11 subjectcode 0.00 subjectcode 5.51 subjectcode 0.17 subjectcode
τ11 0.10 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag 0.11 subjectcode.rem_mem_neg_mood_cs_lag 0.00 subjectcode.rem_mem_neg_mood_cs_lag
0.07 subjectcode.neg_mood_cs_lag_rem 0.00 subjectcode.neg_mood_cs_lag_rem 0.00 subjectcode.neg_mood_cs_lag_rem 0.09 subjectcode.neg_mood_cs_lag_rem 0.00 subjectcode.neg_mood_cs_lag_rem
ρ01 -0.48 -0.43 -0.40 -0.59 -0.56
-0.55 -0.68 -0.73 -0.67 -0.69
ICC     0.00 0.93 0.29
N 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode 184 subjectcode
Observations 773 773 773 773 773
Marginal R2 / Conditional R2 0.135 / NA 0.004 / NA 0.000 / 0.000 0.319 / 0.955 0.134 / 0.389
AIC 2517.737 2469.493 2474.506 2376.402 2379.724
NA

Check Final Model

summary(h2_neg_remote_models[[4]])
Generalized linear mixed model fit by maximum likelihood (Laplace Approximation) ['glmerMod']
 Family: Gamma  ( identity )
Formula: neg_mood_cs ~ condition * rem_mem_neg_mood_cs_lag * neg_mood_cs_lag_rem +      gender + age + education + (1 + rem_mem_neg_mood_cs_lag +      neg_mood_cs_lag_rem | subjectcode)
   Data: data
Control: glmerControl(calc.derivs = F, optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))

     AIC      BIC   logLik deviance df.resid 
  2376.4   2483.4  -1165.2   2330.4      750 

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.8828 -0.4912 -0.0808  0.4596  3.6889 

Random effects:
 Groups      Name                    Variance Std.Dev. Corr       
 subjectcode (Intercept)             5.51342  2.3481              
             rem_mem_neg_mood_cs_lag 0.11297  0.3361   -0.59      
             neg_mood_cs_lag_rem     0.09193  0.3032   -0.67 -0.19
 Residual                            0.02806  0.1675              
Number of obs: 773, groups:  subjectcode, 184

Fixed effects:
                                                                Estimate Std. Error t value Pr(>|z|)  
(Intercept)                                                     5.049496   2.363180   2.137   0.0326 *
conditionremitted                                              -2.630500   2.664145  -0.987   0.3235  
conditiondepressed                                             -0.966208   2.690305  -0.359   0.7195  
rem_mem_neg_mood_cs_lag                                         0.023389   0.455195   0.051   0.9590  
neg_mood_cs_lag_rem                                            -0.094806   0.383815  -0.247   0.8049  
gender1                                                         0.033476   0.110266   0.304   0.7614  
age                                                            -0.001509   0.004283  -0.352   0.7246  
education1                                                      0.264451   0.315903   0.837   0.4025  
education2                                                      0.067441   0.303836   0.222   0.8243  
conditionremitted:rem_mem_neg_mood_cs_lag                       0.772046   0.514111   1.502   0.1332  
conditiondepressed:rem_mem_neg_mood_cs_lag                      0.391886   0.518051   0.756   0.4494  
conditionremitted:neg_mood_cs_lag_rem                           0.341221   0.432229   0.789   0.4299  
conditiondepressed:neg_mood_cs_lag_rem                          0.086013   0.437462   0.197   0.8441  
rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem                     0.047084   0.074756   0.630   0.5288  
conditionremitted:rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem  -0.109702   0.082748  -1.326   0.1849  
conditiondepressed:rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem -0.050402   0.083655  -0.603   0.5468  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation matrix not shown by default, as p = 16 > 12.
Use print(x, correlation=TRUE)  or
    vcov(x)        if you need it
optimizer (bobyqa) convergence code: 1 (bobyqa -- maximum number of function evaluations exceeded)
plot_diagnostics(h2_neg_remote_models)

car::vif(h2_neg_remote_models[[4]])
                                                              GVIF Df GVIF^(1/(2*Df))
condition                                             1.719804e+05  2       20.364312
rem_mem_neg_mood_cs_lag                               7.754904e+01  1        8.806193
neg_mood_cs_lag_rem                                   7.174831e+01  1        8.470437
gender                                                1.066973e+00  1        1.032944
age                                                   1.090042e+00  1        1.044051
education                                             1.089394e+00  2        1.021636
condition:rem_mem_neg_mood_cs_lag                     1.852161e+05  2       20.745298
condition:neg_mood_cs_lag_rem                         1.832876e+05  2       20.691086
rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem           1.827105e+02  1       13.517044
condition:rem_mem_neg_mood_cs_lag:neg_mood_cs_lag_rem 1.977452e+05  2       21.087567

Follow-up


m2_neg_rem_control <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_remitted <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode),
                   data=df_medal_clean_remote_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_depressed <-  glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rem_control, m2_neg_rem_remitted, m2_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
  control remitted depressed
Predictors Estimates std. Error CI p Estimates std. Error CI p Estimates std. Error CI p
(Intercept) 6.26 2.12 2.07 – 10.44 0.004 2.97 1.60 -0.17 – 6.11 0.063 2.69 1.76 -0.79 – 6.17 0.129
rem mem neg mood cs lag -0.28 0.39 -1.06 – 0.49 0.471 0.73 0.26 0.22 – 1.24 0.005 0.57 0.27 0.03 – 1.11 0.039
neg mood cs lag rem -0.33 0.34 -0.99 – 0.34 0.337 0.15 0.21 -0.26 – 0.56 0.469 0.15 0.23 -0.31 – 0.61 0.510
gender [1] -0.01 0.13 -0.27 – 0.25 0.926 0.17 0.19 -0.21 – 0.55 0.389 0.01 0.21 -0.40 – 0.42 0.963
age 0.00 0.01 -0.01 – 0.01 0.831 -0.00 0.01 -0.02 – 0.01 0.739 0.01 0.01 -0.01 – 0.02 0.515
education [2] -0.12 0.13 -0.39 – 0.14 0.364 -0.09 0.44 -0.95 – 0.77 0.841 0.43 0.48 -0.51 – 1.38 0.366
rem mem neg mood cs lag *
neg mood cs lag rem
0.11 0.06 -0.02 – 0.23 0.093 -0.05 0.04 -0.12 – 0.02 0.189 -0.03 0.04 -0.12 – 0.05 0.423
education [1] 0.53 0.47 -0.40 – 1.46 0.264 0.32 0.50 -0.66 – 1.29 0.526
Random Effects
σ2 0.01 0.04 0.03
τ00 7.80 subjectcode 2.21 subjectcode 2.86 subjectcode
7.53 subjectcode.1 2.28 subjectcode.1 4.00 subjectcode.1
τ11 0.29 subjectcode.rem_mem_neg_mood_cs_lag 0.09 subjectcode.rem_mem_neg_mood_cs_lag 0.10 subjectcode.rem_mem_neg_mood_cs_lag
0.21 subjectcode.1.neg_mood_cs_lag_rem 0.05 subjectcode.1.neg_mood_cs_lag_rem 0.09 subjectcode.1.neg_mood_cs_lag_rem
ρ01 -1.00 subjectcode -1.00 subjectcode -1.00 subjectcode
-1.00 subjectcode.1 -0.98 subjectcode.1 -1.00 subjectcode.1
ICC 0.98 0.79 0.83
N 52 subjectcode 84 subjectcode 45 subjectcode
Observations 224 356 190
Marginal R2 / Conditional R2 0.358 / 0.984 0.618 / 0.920 0.514 / 0.917
AIC 449.601 1180.764 649.358

7 Exploratory Analyses

7.1 Mediation Models

Following the tests we ran for hypothesis 1, we would also like to see whether or not mediation effects exist.

RecPA


#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_moderation =  df_medal_clean %>% 
  filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717)) %>% 
  filter(!(is.na(pos_mood) | is.na(rec_mem_pos_mood_lag))) 

# med model
med_fit_h1_pos_rec <- lme4::lmer(rec_mem_pos_mood_lag ~ condition_dummy +
                                   age + gender + education + 
                                   (1 |subjectcode),
                                 data = df_medal_moderation)
# full model
out_fit_h1_pos_rec <-lme4::lmer(pos_mood ~ condition_dummy + rec_mem_pos_mood_lag +
                                  age + gender +education +
                                  (1 + rec_mem_pos_mood_lag |subjectcode), 
                                data = df_medal_moderation)
Warning: Model failed to converge with max|grad| = 0.00497338 (tol = 0.002, component 1)
#mediation analysis for control and depressed
med_pos_rec_dep <- mediation::mediate(med_fit_h1_pos_rec, out_fit_h1_pos_rec,
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 3 ,  mediator = 'rec_mem_pos_mood_lag', 
                                      sims=10000)
summary(med_pos_rec_dep)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)             -1.533       -1.860        -1.22  <2e-16 ***
ACME (treated)             -1.533       -1.860        -1.22  <2e-16 ***
ADE (control)              -1.414       -1.686        -1.14  <2e-16 ***
ADE (treated)              -1.414       -1.686        -1.14  <2e-16 ***
Total Effect               -2.947       -3.358        -2.56  <2e-16 ***
Prop. Mediated (control)    0.519        0.447         0.59  <2e-16 ***
Prop. Mediated (treated)    0.519        0.447         0.59  <2e-16 ***
ACME (average)             -1.533       -1.860        -1.22  <2e-16 ***
ADE (average)              -1.414       -1.686        -1.14  <2e-16 ***
Prop. Mediated (average)    0.519        0.447         0.59  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 3046 


Simulations: 10000 

RemNA

df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rec_mem_neg_mood_lag))) 

# med model
med_fit_h1_neg_rec <- lme4::lmer(rec_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +  
                                   (1 |subjectcode), 
                                 data = df_medal_moderation)
# full model
out_fit_h1_neg_rec <- lme4::lmer(neg_mood ~ condition_dummy + rec_mem_neg_mood_lag +
                                  age + gender +education + 
                                  (1 + rec_mem_neg_mood_lag|subjectcode), 
                                data = df_medal_moderation)


#mediation analysis for control and remitted
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' , mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 2 ,   
                           sims=10000))

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              0.725        0.461         1.00  <2e-16 ***
ACME (treated)              0.725        0.461         1.00  <2e-16 ***
ADE (control)               0.791        0.482         1.10  <2e-16 ***
ADE (treated)               0.791        0.482         1.10  <2e-16 ***
Total Effect                1.516        1.114         1.92  <2e-16 ***
Prop. Mediated (control)    0.478        0.341         0.63  <2e-16 ***
Prop. Mediated (treated)    0.478        0.341         0.63  <2e-16 ***
ACME (average)              0.725        0.461         1.00  <2e-16 ***
ADE (average)               0.791        0.482         1.10  <2e-16 ***
Prop. Mediated (average)    0.478        0.341         0.63  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 3044 


Simulations: 10000 
#mediation analysis for control and depressed
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' ,  mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 3 ,
                           sims=10000))

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              1.821        1.473         2.19  <2e-16 ***
ACME (treated)              1.821        1.473         2.19  <2e-16 ***
ADE (control)               1.760        1.378         2.15  <2e-16 ***
ADE (treated)               1.760        1.378         2.15  <2e-16 ***
Total Effect                3.582        3.088         4.08  <2e-16 ***
Prop. Mediated (control)    0.509        0.431         0.59  <2e-16 ***
Prop. Mediated (treated)    0.509        0.431         0.59  <2e-16 ***
ACME (average)              1.821        1.473         2.19  <2e-16 ***
ADE (average)               1.760        1.378         2.15  <2e-16 ***
Prop. Mediated (average)    0.509        0.431         0.59  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 3044 


Simulations: 10000 

7.2 Moderated Mediation

RemPA-CurrPA


df_medal_moderation = df_medal_clean_remote %>% 
  filter(!(is.na(pos_mood) | is.na(rem_mem_pos_mood_lag))) 

#mediation first
med_fit_h2_pos_rem <- lme4::lmer(rem_mem_pos_mood_lag ~ condition_dummy + 
                                   age + gender + education + 
                                   (1 |subjectcode), 
                                 data =df_medal_moderation)
# full model
out_fit_h2_pos_rem <-lme4::lmer(pos_mood ~ condition_dummy + rem_mem_pos_mood_lag 
                                + age + gender +education +
                                  (1 + rem_mem_pos_mood_lag |subjectcode),
                                data=df_medal_moderation)
Warning: Model failed to converge with max|grad| = 0.00602257 (tol = 0.002, component 1)
#mediation analysis for control and remitted
med_pos_rem_rem <- mediation::mediate(med_fit_h2_pos_rem, out_fit_h2_pos_rem, 
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_pos_mood_lag')
summary(med_pos_rem_rem)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)             -0.663       -0.980        -0.36  <2e-16 ***
ACME (treated)             -0.663       -0.980        -0.36  <2e-16 ***
ADE (control)              -0.369       -0.680        -0.07   0.012 *  
ADE (treated)              -0.369       -0.680        -0.07   0.012 *  
Total Effect               -1.032       -1.453        -0.63  <2e-16 ***
Prop. Mediated (control)    0.643        0.420         0.91  <2e-16 ***
Prop. Mediated (treated)    0.643        0.420         0.91  <2e-16 ***
ACME (average)             -0.663       -0.980        -0.36  <2e-16 ***
ADE (average)              -0.369       -0.680        -0.07   0.012 *  
Prop. Mediated (average)    0.643        0.420         0.91  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 771 


Simulations: 1000 
#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_pos_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV =condition_dummy, DV = pos_mood, M = rem_mem_pos_mood_lag, Mod = pos_mood_lag_rem) 
rbindlist(med_h2_pos_rem_rem$paths, idcol = T)
NA

RemNA - CurrNA

df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rem_mem_neg_mood_lag))) 

#mediation first
med_fit_h2_neg_rem <- lme4::lmer(rem_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +
                                   (1 |subjectcode),
                                 control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                 data =df_medal_moderation)

out_fit_h2_neg_rem <-lme4::lmer(neg_mood ~ condition_dummy + rem_mem_neg_mood_lag +
                                  age + gender + education +
                                  (1 + rem_mem_neg_mood_lag |subjectcode), 
                                control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                data=df_medal_moderation)

#mediation analysis for control and depressed
med_neg_rem_dep <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem,  
                                      mediator = 'rem_mem_neg_mood_lag',  treat = 'condition_dummy' , 
                                      control.value = 1, treat.value = 3 , sims=10000) 
summary(med_neg_rem_dep)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              2.350        1.881         2.85  <2e-16 ***
ACME (treated)              2.350        1.881         2.85  <2e-16 ***
ADE (control)               1.223        0.787         1.66  <2e-16 ***
ADE (treated)               1.223        0.787         1.66  <2e-16 ***
Total Effect                3.573        3.001         4.13  <2e-16 ***
Prop. Mediated (control)    0.657        0.557         0.76  <2e-16 ***
Prop. Mediated (treated)    0.657        0.557         0.76  <2e-16 ***
ACME (average)              2.350        1.881         2.85  <2e-16 ***
ADE (average)               1.223        0.787         1.66  <2e-16 ***
Prop. Mediated (average)    0.657        0.557         0.76  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 770 


Simulations: 10000 
#mediation analysis for control and remitted
med_neg_rem_rem <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem, treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_neg_mood_lag', sims=10000) 
summary(med_neg_rem_rem)

Causal Mediation Analysis 

Quasi-Bayesian Confidence Intervals

Mediator Groups: subjectcode 

Outcome Groups: subjectcode 

Output Based on Overall Averages Across Groups 

                         Estimate 95% CI Lower 95% CI Upper p-value    
ACME (control)              0.894        0.546         1.27  <2e-16 ***
ACME (treated)              0.894        0.546         1.27  <2e-16 ***
ADE (control)               0.424        0.142         0.70  0.0032 ** 
ADE (treated)               0.424        0.142         0.70  0.0032 ** 
Total Effect                1.318        0.886         1.76  <2e-16 ***
Prop. Mediated (control)    0.678        0.505         0.87  <2e-16 ***
Prop. Mediated (treated)    0.678        0.505         0.87  <2e-16 ***
ACME (average)              0.894        0.546         1.27  <2e-16 ***
ADE (average)               0.424        0.142         0.70  0.0032 ** 
Prop. Mediated (average)    0.678        0.505         0.87  <2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Sample Size Used: 770 


Simulations: 10000 
#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_neg_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_rem$paths, idcol = T)

#depressed group
df_medal_con_dep$condition_dummy = build_contrast(df_medal_con_dep$condition, 'control', 'depressed')
med_h2_neg_rem_dep <- mdt_moderated(data = df_medal_con_dep, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_dep$paths,idcol = T)
NA

7.3 Context

asis_output(tab_model( (lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)), 
           (lmer(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
            (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
             (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)))$knitr)
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
boundary (singular) fit: see help('isSingular')
  pos mood cs pos mood cs neg mood cs neg mood cs
Predictors Estimates CI p Estimates CI p Estimates CI p Estimates CI p
(Intercept) 3.80 3.44 – 4.15 <0.001 5.35 4.64 – 6.05 <0.001 4.77 4.11 – 5.44 <0.001 4.77 4.11 – 5.44 <0.001
rec mem pos mood cs lag 0.49 0.44 – 0.54 <0.001
context location 0.01 -0.13 – 0.14 0.906 -0.25 -0.53 – 0.02 0.068 -0.19 -0.44 – 0.06 0.136 -0.19 -0.44 – 0.06 0.136
rec mem pos mood cs lag *
context location
0.01 -0.01 – 0.03 0.538
rem mem pos mood cs lag 0.33 0.21 – 0.45 <0.001
rem mem pos mood cs lag *
context location
0.05 0.01 – 0.10 0.023
rem mem neg mood cs lag 0.26 0.14 – 0.39 <0.001 0.26 0.14 – 0.39 <0.001
rem mem neg mood cs lag *
context location
0.04 -0.01 – 0.08 0.129 0.04 -0.01 – 0.08 0.129
Random Effects
σ2 1.28 1.48 1.45 1.45
τ00 0.00 subjectcode 0.00 subjectcode 0.00 subjectcode 0.00 subjectcode
N 191 subjectcode 188 subjectcode 188 subjectcode 188 subjectcode
Observations 3146 789 789 789
Marginal R2 / Conditional R2 0.257 / NA 0.163 / NA 0.100 / NA 0.100 / NA

7.4 SRET

In addition to the EMA, participants also completed the SRET. This is a lab based memory bias measure. We want to check whether our memory bias measures from real-life are linked to ones from the lab in this supplementary analysis.


library('haven')
Warning: package ‘haven’ was built under R version 4.2.3
sret_file = file.path("Z:/", "inbox", "transfer-2023-12-07-02-15-pm", 'MEDAL_pre and post quest and remote recall_workfile!!_for paper only var.sav')
sret_data = read_sav(sret_file) 
sret_data = sret_data %>% select(c(1,4)) %>% distinct()

df_medal_rec_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rec_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()  %>% ungroup()


df_medal_rec_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rec_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct() %>% ungroup()


df_medal_rem_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rem_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()

df_medal_rem_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rem_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()


df_cors = rbindlist(list(df_medal_rec_cor1, df_medal_rec_cor2, df_medal_rem_cor1, df_medal_rem_cor2))

df_ref = merge(sret_data, df_cors, by.x = 'subject', by.y = 'subjectcode')
df_ref = df_ref %>% rename(SRET = SRET_BiasGotlib_neg) %>% dplyr::mutate(group = ifelse(subject<800, "remitted", 
                                                                    ifelse(subject>=900, "control", 'depressed')) )


for (mod_type in c("PA_REC", "PA_REM", "NA_REC", "NA_REM")){
  print(mod_type)
  print(summary(lm(SRET ~ corr*group, data = df_ref %>% filter(model_type == mod_type))))
}
[1] "PA_REC"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.42802 -0.18783 -0.03731  0.07877  0.96612 

Coefficients:
                    Estimate Std. Error t value Pr(>|t|)  
(Intercept)         0.032066   0.055434   0.578   0.5637  
corr                0.009979   0.109528   0.091   0.9275  
groupdepressed      0.207749   0.113678   1.828   0.0694 .
groupremitted       0.109699   0.078769   1.393   0.1655  
corr:groupdepressed 0.299086   0.203331   1.471   0.1431  
corr:groupremitted  0.145614   0.151633   0.960   0.3383  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2605 on 171 degrees of freedom
  (14 observations deleted due to missingness)
Multiple R-squared:  0.2304,    Adjusted R-squared:  0.2079 
F-statistic: 10.24 on 5 and 171 DF,  p-value: 1.343e-08

[1] "PA_REM"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.41550 -0.21434 -0.03978  0.12018  0.96298 

Coefficients:
                     Estimate Std. Error t value Pr(>|t|)    
(Intercept)          0.034360   0.044794   0.767 0.444147    
corr                 0.008142   0.065661   0.124 0.901462    
groupdepressed       0.365632   0.065231   5.605 8.57e-08 ***
groupremitted        0.192437   0.056284   3.419 0.000792 ***
corr:groupdepressed  0.007906   0.096231   0.082 0.934621    
corr:groupremitted  -0.025126   0.082845  -0.303 0.762047    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2675 on 165 degrees of freedom
  (17 observations deleted due to missingness)
Multiple R-squared:  0.2043,    Adjusted R-squared:  0.1802 
F-statistic: 8.475 on 5 and 165 DF,  p-value: 3.685e-07

[1] "NA_REC"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.45379 -0.17704 -0.03939  0.08131  0.96345 

Coefficients:
                    Estimate Std. Error t value Pr(>|t|)    
(Intercept)          0.03215    0.04477   0.718 0.473691    
corr                 0.01727    0.10065   0.172 0.863976    
groupdepressed       0.29548    0.08311   3.555 0.000489 ***
groupremitted        0.10471    0.06401   1.636 0.103709    
corr:groupdepressed  0.14637    0.15930   0.919 0.359496    
corr:groupremitted   0.18205    0.13735   1.325 0.186793    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2605 on 170 degrees of freedom
  (15 observations deleted due to missingness)
Multiple R-squared:  0.2323,    Adjusted R-squared:  0.2097 
F-statistic: 10.29 on 5 and 170 DF,  p-value: 1.249e-08

[1] "NA_REM"

Call:
lm(formula = SRET ~ corr * group, data = df_ref %>% filter(model_type == 
    mod_type))

Residuals:
     Min       1Q   Median       3Q      Max 
-0.43367 -0.17164 -0.04881  0.12752  0.94467 

Coefficients:
                    Estimate Std. Error t value Pr(>|t|)    
(Intercept)          0.04423    0.04128   1.071 0.285714    
corr                -0.02001    0.06467  -0.309 0.757395    
groupdepressed       0.34096    0.06427   5.305 3.86e-07 ***
groupremitted        0.19828    0.05408   3.667 0.000338 ***
corr:groupdepressed  0.08919    0.10774   0.828 0.409041    
corr:groupremitted  -0.07917    0.08544  -0.927 0.355620    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.2702 on 154 degrees of freedom
  (28 observations deleted due to missingness)
Multiple R-squared:  0.2103,    Adjusted R-squared:  0.1846 
F-statistic: 8.201 on 5 and 154 DF,  p-value: 6.88e-07

7.5 Relative Bias

Here we want to check for the relative NA/PA bias. So we extact the random effects from the models to get the slopes for each subject, and then compare the NA/PA slopes. This tells us if theres a bias between negative and positive memory.


# Get individual slopes
pos_rec_ref = as.data.frame(ranef(h1_pos_recent_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rem_ref = as.data.frame(ranef(h1_pos_remote_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rec_ref = as.data.frame(ranef(h1_neg_recent_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rem_ref = as.data.frame(ranef(h1_neg_remote_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rec_ref$mod = "PA_REC"
pos_rem_ref$mod = "PA_REM"
neg_rec_ref$mod = "NA_REC"
neg_rem_ref$mod = "NA_REM"

# Put together into a new data frame
df_ref = rbindlist(list(pos_rec_ref, pos_rem_ref, neg_rec_ref, neg_rem_ref))
df_ref = df_ref %>% 
  dplyr::mutate(sub = as.numeric(as.character(grp))) %>%
  dplyr::mutate(group = ifelse(sub <= 800, "remitted", 
                               ifelse(sub>=900, "control", 'depressed')) )

df_ref_rec = df_ref %>% filter(mod == 'PA_REC' | mod=='NA_REC')
df_ref_rem = df_ref %>% filter(mod == 'PA_REM' | mod=='NA_REM')
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rec))
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: condsd ~ mod * group + (1 | sub)
   Data: df_ref_rec

REML criterion at convergence: -1628.1

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-5.2099 -0.1327  0.0034  0.2821  2.0044 

Random effects:
 Groups   Name        Variance  Std.Dev.
 sub      (Intercept) 7.503e-05 0.008662
 Residual             5.543e-04 0.023543
Number of obs: 370, groups:  sub, 185

Fixed effects:
                           Estimate Std. Error         df t value Pr(>|t|)    
(Intercept)                0.174483   0.003446 358.898081  50.636  < 2e-16 ***
modPA_REC                 -0.151757   0.004573 182.000000 -33.182  < 2e-16 ***
groupdepressed            -0.031232   0.005055 358.898081  -6.178 1.76e-09 ***
groupremitted             -0.019837   0.004381 358.898081  -4.528 8.11e-06 ***
modPA_REC:groupdepressed   0.028693   0.006709 182.000000   4.277 3.06e-05 ***
modPA_REC:groupremitted    0.018448   0.005814 182.000000   3.173  0.00177 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
              (Intr) mdPA_REC grpdpr grprmt mdPA_REC:grpd
modPA_REC     -0.664                                     
groupdprssd   -0.682  0.452                              
groupremttd   -0.787  0.522    0.536                     
mdPA_REC:grpd  0.452 -0.682   -0.664 -0.356              
mdPA_REC:grpr  0.522 -0.787   -0.356 -0.664  0.536       
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rem))
Linear mixed model fit by REML. t-tests use Satterthwaite's method ['lmerModLmerTest']
Formula: condsd ~ mod * group + (1 | sub)
   Data: df_ref_rem

REML criterion at convergence: -3053.2

Scaled residuals: 
    Min      1Q  Median      3Q     Max 
-4.6559 -0.3184  0.0410  0.5489  1.7622 

Random effects:
 Groups   Name        Variance  Std.Dev.
 sub      (Intercept) 2.596e-06 0.001611
 Residual             9.576e-06 0.003094
Number of obs: 368, groups:  sub, 184

Fixed effects:
                           Estimate Std. Error         df t value Pr(>|t|)    
(Intercept)               4.997e-02  4.792e-04  3.463e+02 104.265  < 2e-16 ***
modPA_REM                -1.178e-02  6.011e-04  1.810e+02 -19.601  < 2e-16 ***
groupdepressed            6.628e-05  7.030e-04  3.463e+02   0.094   0.9249    
groupremitted            -1.069e-04  6.106e-04  3.463e+02  -0.175   0.8612    
modPA_REM:groupdepressed -3.553e-03  8.818e-04  1.810e+02  -4.029 8.22e-05 ***
modPA_REM:groupremitted  -1.525e-03  7.659e-04  1.810e+02  -1.991   0.0479 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Correlation of Fixed Effects:
              (Intr) mdPA_REM grpdpr grprmt mdPA_REM:grpd
modPA_REM     -0.627                                     
groupdprssd   -0.682  0.428                              
groupremttd   -0.785  0.492    0.535                     
mdPA_REM:grpd  0.428 -0.682   -0.627 -0.336              
mdPA_REM:grpr  0.492 -0.785   -0.336 -0.627  0.535       

8 Plots

First we set our theme and fix the data for the figures we want

# Theme
ggtheme = theme(text = element_text(size = 8), 
                panel.background = element_rect(fill = "transparent"), 
                panel.grid.major = element_line(color = "grey90"),
                panel.grid.minor = element_line(color = "grey100"),
                panel.border = element_rect(color = "grey80", fill = "transparent"))

# Estimate the SD
df_medal_clean2 = df_medal_clean %>% 
  mutate(pos_mood_sd = ifelse( (pos_mood_cs_lag_rec > (mean(pos_mood_cs_lag_rec, na.rm=T)+sd(pos_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (pos_mood_cs_lag_rec < (mean(pos_mood_cs_lag_rec, na.rm=T)-sd(pos_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean"))) %>% 
    mutate(neg_mood_sd = ifelse( (neg_mood_cs_lag_rec > (mean(neg_mood_cs_lag_rec, na.rm=T)+sd(neg_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (neg_mood_cs_lag_rec < (mean(neg_mood_cs_lag_rec, na.rm=T)-sd(neg_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean")))

# Fix factor levels
df_medal_clean2$Condition =  factor(df_medal_clean2$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('Never-Depressed', 'Remitted', 'Depressed'))
df_medal_clean2$pos_mood_sd =  factor(df_medal_clean2$pos_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))
df_medal_clean2$neg_mood_sd =  factor(df_medal_clean2$neg_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))

8.0.1 PA-CUR-REC


# Plot
plot_pa_rec = ggplot(df_medal_clean2, aes(x = rec_mem_pos_mood_c_lag, y=pos_mood_c, color = pos_mood_sd, fill = pos_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC[PA], " (subject-centered a.u.)")), y = "PA (subject-centered a.u.)", color = bquote(CUR[PA]), fill = bquote(CUR[PA]) ) +
  ggtheme + 
  scale_color_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  facet_grid(.~Condition) 
 
plot_pa_rec

8.0.2 PA-CUR-REC


# Plot
plot_na_rec = 
  ggplot(df_medal_clean2, aes(x = rec_mem_neg_mood_c_lag, y=neg_mood_c, color = neg_mood_sd, fill = neg_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC['NA'], " (subject-centered a.u.)")), y = "NA (subject-centered a.u.)", color = bquote(CUR["NA"]), fill = bquote(CUR["NA"]) ) +
    scale_color_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  ggtheme + 
  facet_grid(.~Condition) 
plot_na_rec

8.0.3 Combined Plot for pub

ggarrange(NA, plot_pa_rec, NA,  plot_na_rec,
          widths=c(0.05, 1, 0.05, 1), ncol = 2, nrow = 2,
          labels=c("A", NA, "B"))
Warning: Cannot convert object of class logical into a grob.`geom_smooth()` using formula = 'y ~ x'Warning: Removed 4247 rows containing non-finite values (`stat_smooth()`).Warning: Cannot convert object of class logical into a grob.`geom_smooth()` using formula = 'y ~ x'Warning: Removed 4249 rows containing non-finite values (`stat_smooth()`).
ggsave("figures/fig1_threeway.pdf", device = 'pdf', dpi = 320, height = 6)
Saving 7.3 x 6 in image

---
title: 'Emotional Memory in Daily Life'
author:
- Noa Magusin
- Rayyan Tutunji
date: "`r Sys.Date()`"
output:
  html_notebook:
    fig_width: 8
    fig_height: 8
    toc: yes
    toc_float: yes
    number_sections: yes
    theme: flatly
  html_document:
    toc: yes
    df_print: paged
---

# Introduction

This notebook contains analyses from the MEDAL study, assessing memory bias in remitted and depressed individuals. The pre-registration for the main analyses, as well as a preprint version of the accompanying article can be found [here](https://osf.io/zufjs/) as well. The main aim of the current work was to examine how three groups (control, remitted, depressed) differ in terms of emotional memory dynamics in a real-life setting.

```{r message=TRUE, warning=FALSE}
#Load packages 
#renv::activate()
library(dplyr)
library(janitor)
library(readxl)
library(data.table)
library(ggplot2)
library(psych)
library(devtools)
library(pastecs)
library(cli)
library(lmerTest) #linear mixed models 
library(performance) #fits of residuals 
library(DescTools)
library(tidyr) # Separate trigger column
library(sjPlot) # for nice tables and interaction plots 
library(knitr) # to print the tables within notebook
library(foreach) #run parallel loops
library(parallel)
library(doParallel) #run parallel loops
library(ggpubr) # emmeans
library(mediation) #mediation analysis 
library(plotly) #for interactive plots
library(jtools) #for theme_apa
library(JSmediation) #moderated mediation
library(wesanderson) 
source("functions.R")
```

# Data

Here we load in our data and preprocess it in steps. We first need to separate the different surveys that were delivered to bring them into the same sampling times, merge in some descriptives we need as covariates, and estimate subject-centered and lagged variables for our models later. 


1. Load and clean data

```{r}
# Load the data
MEDAL_ESM_compleet <- read_excel("data/MEDAL_ESM_compleet.xlsx")
# Move variables around and clean names
MEDAL_ESM_compleet= MEDAL_ESM_compleet %>%
  rename(id='Movisens-ID') %>% 
  clean_names() %>%
  rename(subjectcode = 'deelnemersnr') %>% 
  relocate('subjectcode', .after = 'id')
df_medal_try = MEDAL_ESM_compleet

```

2. Split different EMA surveys we have

```{r}
# Sleep EMA Qs
df_medal_sleep = df_medal_try %>% 
  filter(form=='Slaap') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, 14)) 
# Recent EMA Qs
df_medal_recent = df_medal_try %>% 
  filter(form=='Recent') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rec')))
# Remote EMA Qs
df_medal_remote = df_medal_try %>%
  filter(form=='Remote') %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('rem')))
# Mastery EMA Qs
df_medal_mastery = df_medal_try %>%
  filter(form=='Mastery') %>% 
  dplyr::select(c(1, 2, 4, 5, 7, 9, contains('master')))
# Standard Qs
df_medal_standard = df_medal_try %>%
  filter(form=='Standaard')  %>%
  dplyr::select(c(1, 2, 4, 5, 7, 9, (-contains('mastery') & -contains('rem') & -contains('rec')))) %>% 
  dplyr::select(-c(7,13,14))

# merge sleep and standard
df_medal_merged = merge(df_medal_sleep, df_medal_standard, by=c('id', 'trigger_counter', 'subjectcode'), suffixes = c("_sleep", ""), all=T )

# merge to recent, remote and mastery
df_medal_merged= merge(df_medal_merged, df_medal_recent, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_recent'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_remote, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_remote'), all = T )
df_medal_merged = merge(df_medal_merged, df_medal_mastery, by=c('id', 'trigger_counter', 'subjectcode' ), suffixes = c('', '_mastery'), all = T )

# Fix start dates and times
df_medal_merged$form_start_date <- as.character(df_medal_merged$form_start_date)
df_medal_merged$form_start_date <- as.ITime(df_medal_merged$form_start_date)
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(form_start_date = ifelse(is.na(form_start_date), form_start_date_sleep, form_start_date))
```

3. Add variables on Education, Gender, and Age

```{r}
# Load in previous data for demographics (also contains EMA)
load("data/IPAQ_Maeve_Workspace.RData")
df_MEDAL <- df_MEDAL[-c(192), ]

# Select relevant demo info
df_MEDAL = df_MEDAL %>% 
  dplyr::select(1, 3, 5:7) %>% 
  rename('subjectcode' = 'subject', 'education' = 'Education3groups', 'n_episodes'='NumberEpisodes' )

# merge demo to full EMA
df_medal_merged =  merge(df_medal_merged, df_MEDAL, by = c('subjectcode'), all = T ) %>%
  clean_names() 

df_medal_merged$education = as.factor(df_medal_merged$education)
df_medal_merged$gender = as.factor(df_medal_merged$gender)

df_medal_merged = df_medal_merged %>% 
  relocate(c('id', 'gender', 'age', 'education'), .after = 'subjectcode') %>% 
  relocate ('age', .after = 'gender') %>% 
  relocate ('education', .after = 'age') %>% 
  relocate ('id', .before = 'subjectcode')

```

4. Categorize the subjectcode into the three conditions (Depressed, Remitted, Control (Never-depressed))

```{r}
df_medal_merged$condition = case_when(df_medal_merged$subjectcode >= 700 & df_medal_merged$subjectcode < 800 ~ "remitted",
                                      df_medal_merged$subjectcode >= 800 & df_medal_merged$subjectcode < 900 ~ "depressed", 
                                      df_medal_merged$subjectcode >= 900 ~ "control")
df_medal_merged = df_medal_merged %>% relocate ('condition', .after = 'subjectcode' )
df_medal_merged$condition <- as.factor(df_medal_merged$condition)
```

5. Calculate reaction time 

```{r}
df_medal_merged$rt_sleep = lubridate::as.difftime( df_medal_merged$form_finish_date_sleep - df_medal_merged$form_start_date_sleep)
```

6. Separate time from when trigger was sent

```{r}

df_medal_merged = df_medal_merged %>% 
  tidyr::separate(trigger, c('Random', 'Time', 'Trigger_Time_1', 'Trigger_Time_2'))  %>% 
  dplyr::mutate(trigger_time = paste(Trigger_Time_1, Trigger_Time_2, sep=':')) %>%#separate time from previous trigger
  dplyr::mutate(trigger = paste(Random, Time, trigger_time, sep=' ')) #bring back 'trigger' column 

#Turn numbers into time format
df_medal_merged$trigger_time <- as.ITime(df_medal_merged$trigger_time)
```

7. Find the trigger number per day

```{r}
#df_medal_merged$trigger_counter[is.na(df_medal_merged$trigger_time)] <- 1

df_medal_merged$trigger_number[is.na(df_medal_merged$trigger_time)] <- 1 
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("10:00:00"))& df_medal_merged$trigger_time < (as.ITime("12:00:00"))] <- 2
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("12:00:00"))& df_medal_merged$trigger_time < (as.ITime("14:00:00"))] <- 3
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("14:00:00"))& df_medal_merged$trigger_time < (as.ITime("16:00:00"))] <- 4
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("16:00:00"))& df_medal_merged$trigger_time < (as.ITime("18:00:00"))] <- 5
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("18:00:00"))& df_medal_merged$trigger_time < (as.ITime("20:00:00"))] <- 6
df_medal_merged$trigger_number[df_medal_merged$trigger_time >= (as.ITime("20:00:00"))& df_medal_merged$trigger_time < (as.ITime("23:00:01"))] <- 7

# df
df_medal_merged = df_medal_merged %>% 
  relocate ('trigger_number', .after = 'trigger_counter')%>% relocate('trigger_time', .before ='form_start_date') %>%
  relocate('trigger', .before ='trigger_time')
```

8. Final cleaning of the dataset
```{r}

#Create dataset where double morning list is removed 
df_medal_merged = df_medal_merged %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>%
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter)) %>% 
  dplyr::mutate(trigger_counter=ifelse(trigger_number==1 & lag(trigger_number)==1, lag(trigger_counter), trigger_counter))

# now we have the clean dataframe, remove duplicated coloumns
df_medal_clean = df_medal_merged %>%
  dplyr::group_by(subjectcode) %>% 
  distinct(trigger_counter, .keep_all = TRUE) %>% 
  dplyr::select (1:12, 17:60) %>% 
  dplyr::mutate(original_order = row_number()) %>% 
  relocate('original_order', .before = 'id')

#Set the order of the factor to control, remission, depression (for visual purposes)
df_medal_clean$condition <- factor(df_medal_clean$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('control', 'remitted', 'depressed'))

# Create dummy variable 
df_medal_clean = df_medal_clean %>% 
  mutate(condition_dummy = case_when(condition == 'depressed'~ 3, condition == 'remitted'~ 2, condition == 'control' ~ 1))

df_medal_clean$condition_dummy = as.factor(df_medal_clean$condition_dummy)

#set weekday 
df_medal_clean$form_finish_date <- as.character(df_medal_clean$form_finish_date)
df_medal_clean$form_finish_date <- strptime(df_medal_clean$form_finish_date, format = '%Y-%m-%d %H:%M:%S')

df_medal_clean$finish_date <- as.Date(df_medal_clean$form_finish_date)
df_medal_clean$weekday <- as.integer(format(df_medal_clean$finish_date, '%w'))
df_medal_clean = df_medal_clean %>% relocate('weekday', .after = 'trigger_number')

#solve duplicate issue 
df_medal_duplicate = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% filter(duplicated(trigger_number))

  #since row 10 for subject 712 is a duplicate morning list
  df_medal_clean <- subset(df_medal_clean, !(subjectcode == 712 & original_order == 10))
  df_medal_clean = df_medal_clean %>% dplyr::group_by(subjectcode, weekday) %>% 
    mutate(temp = lead(trigger_number), 
           trigger_number = case_when(trigger_number == temp ~ (temp -1), TRUE ~ trigger_number)) %>%
    select(-temp)
  #Take the rows out that have 'NA'for the weekday since form wasn't finished and there is no data for PA, NA, or Memory
  df_medal_clean = df_medal_clean %>% filter(!is.na(weekday))
  df_medal_clean$original_order <- 1:nrow(df_medal_clean) 

# Create expanded df with all potential datapoints

df_medal_day = df_medal_clean %>% 
  dplyr::select('subjectcode', 'weekday', 'original_order') %>% 
  dplyr::mutate(weekday = as.numeric(weekday)) %>%
  dplyr::group_by(subjectcode) %>%
  dplyr:: distinct(weekday, .keep_all =T) %>% 
  dplyr::ungroup()

df_medal_day = df_medal_day %>% 
  slice(rep(1:n(), each = 7)) %>% 
  dplyr::mutate(trigger_number = rep(1:7, length.out=n())) %>% 
  dplyr::rename(order = original_order)


#merge with main dataframe
df_medal_clean =  merge(df_medal_day, df_medal_clean, by=c('subjectcode', 'trigger_number', 'weekday'), all= T) 
df_medal_clean = df_medal_clean %>% mutate(order = as.numeric(order))
df_medal_clean = arrange(df_medal_clean, order)
```

9. Centre, scale, and average Variables 

```{r warning=FALSE}

used_vars = c("neg_mood", "rec_mem_neg_mood", "rem_mem_neg_mood", "pos_mood", "rec_mem_pos_mood", "rem_mem_pos_mood")

#Centering & scaling 
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  # Center and scale
  dplyr::mutate( across(used_vars, ~ (.x/10), .names = "{.col}" ),
                 across(used_vars, ~ mean(.x, na.rm=T), .names = "{.col}_m" ),
                 across(used_vars, ~ (.x - mean(.x, na.rm=T)), .names = "{.col}_c" ), 
                 across(paste0(used_vars, "_c"), ~ DescTools::Winsorize(.x, na.rm = T), .names = "{.col}")) %>%  
  ungroup() %>%
  # Rescale to positive
  mutate(across(c(paste0(used_vars,"_c")), ~ abs(min(.x, na.rm = T)) + 1 + .x, .names = "{.col}s"))

```


10. Lag variables 
```{r}
# Remote 

df_medal_clean = df_medal_clean %>%
  dplyr::group_by(subjectcode) %>%
  mutate(rem_mem_pos_mood_lag = lead(rem_mem_pos_mood, n=7, order_by=subjectcode),
         rem_mem_neg_mood_lag = lead(rem_mem_neg_mood, n=7, order_by=subjectcode),
         pos_mood_lag_rem = lead(pos_mood, n=7, order_by=subjectcode),
         neg_mood_lag_rem = lead(neg_mood, n=7, order_by=subjectcode), 
         rem_mem_pos_mood_cs_lag = lead(rem_mem_pos_mood_cs, n=7, order_by=subjectcode),
         rem_mem_neg_mood_cs_lag = lead(rem_mem_neg_mood_cs, n=7, order_by=subjectcode),
         pos_mood_cs_lag_rem = lead(pos_mood_cs, n=7, order_by=subjectcode),
         neg_mood_cs_lag_rem = lead(neg_mood_cs, n=7, order_by=subjectcode),
         rem_mem_pos_mood_c_lag = lead(rem_mem_pos_mood_c, n=7, order_by=subjectcode),
         rem_mem_neg_mood_c_lag = lead(rem_mem_neg_mood_c, n=7, order_by=subjectcode),
         pos_mood_c_lag_rem = lead(pos_mood_c, n=7, order_by=subjectcode),
         neg_mood_c_lag_rem = lead(neg_mood_c, n=7, order_by=subjectcode)) %>% 
  filter(!is.na(original_order)) # filter out the NA rows for the lagging of recent memory 

# Recent
df_medal_clean = df_medal_clean %>% 
  dplyr::group_by(subjectcode) %>%
  mutate(rec_mem_pos_mood_lag = lead(rec_mem_pos_mood, n=1, order_by=subjectcode),
         rec_mem_neg_mood_lag = lead(rec_mem_neg_mood, n=1, order_by=subjectcode),
         pos_mood_lag_rec = lead(pos_mood, n=1, order_by=subjectcode),
         neg_mood_lag_rec = lead(neg_mood, n=1, order_by=subjectcode),
         rec_mem_pos_mood_cs_lag = lead(rec_mem_pos_mood_cs, n=1, order_by=subjectcode),
         rec_mem_neg_mood_cs_lag = lead(rec_mem_neg_mood_cs, n=1, order_by=subjectcode),
         pos_mood_cs_lag_rec = lead(pos_mood_cs, n=1, order_by=subjectcode),
         neg_mood_cs_lag_rec = lead(neg_mood_cs, n=1, order_by=subjectcode),
         rec_mem_pos_mood_c_lag = lead(rec_mem_pos_mood_c, n=1, order_by=subjectcode),
         rec_mem_neg_mood_c_lag = lead(rec_mem_neg_mood_c, n=1, order_by=subjectcode),
         pos_mood_c_lag_rec = lead(pos_mood_c, n=1, order_by=subjectcode),
         neg_mood_c_lag_rec = lead(neg_mood_c, n=1, order_by=subjectcode)) %>% 
  ungroup()
  

```

# Descriptives {.tabset}

We first plot some general population descriptives, followed by some descriptives of the scales we use in the EMA weeks and compliance rates.

## Population Descriptives {-}
```{r fig.height=12, fig.width=20}
#Create Descriptives Tables
summary_table = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE) %>% group_by(condition) %>% summarise(
  'N' = n(),
  'Age \n M +/- SD' = paste0(round(mean(age, na.rm =T), 1), ' +/- ' , round(sd(age, na.rm =T), 1)),
  #'NA Age' = sum(is.na(age)), 
  '% Education Low' = mean(education == 0, na.rm = T)*100,
  '% Education Middle' = mean(education == 1, na.rm = T)*100,
  '% Education High' = mean(education == 2, na.rm = T)*100,
  #'NA Education' = sum(is.na(education)), 
  '% Female' = mean(gender ==1, na.rm = T)*100,
  #'NA Gender' = sum(is.na(gender))
)

#Compliance rates
df_medal_compliance_individual = df_medal_clean %>% group_by(condition, subjectcode) %>% summarise('ComplianceRate' = sum(!is.na(trigger_number))/42*100)

length(which(df_medal_compliance_individual$ComplianceRate <70))

df_medal_compliance = df_medal_clean %>% group_by(condition) %>% summarise('% Mean Compliance' = sum(!is.na(trigger_number))/(n_distinct(subjectcode) *42)*100) 


# Create a table
summary_table = summary_table %>% left_join(df_medal_compliance, by = 'condition')
rempsyc::nice_table(summary_table)


#create dataset with only unique subjectcode 
df_medal_distinct = df_medal_clean %>% distinct(subjectcode, .keep_all = TRUE)%>% select('subjectcode', 'condition', 'age', 'education', 'gender')

#Test for differences between groups 
  #Age
  df_medal_distinct %>% lm(age ~ condition, data=.) %>% summary()
  
  #education
  chisq.test(df_medal_distinct$education, df_medal_distinct$condition)
  
  #gender
  chisq.test(df_medal_distinct$gender, df_medal_distinct$condition)
  
#Create a Graph  
  #age
  boxplot_age <- ggplot(df_medal_distinct, aes(x=condition, y=age, fill=condition)) +
    geom_boxplot() +
    labs(x = 'Condition', y = 'Age', tag = 'C' ) +
    ggtitle('Age per Condition') +
    scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
    theme_apa() 
  ggplotly(boxplot_age)
    
    #education
    plot_educ <- ggplot(df_medal_distinct, aes(x=condition, fill = education)) +
      geom_bar(position = 'dodge') +
      labs(x='Condition', y= 'Proportion', tag = 'A' ) +
      ggtitle('Education Level per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Low' , 'Middle' , 'High'), name = 'Education')  +
      theme_apa() 
    ggplotly(plot_educ)
      
    #Gender
    plot_gender <- ggplot(df_medal_distinct, aes(x=condition, fill = gender)) +
      geom_bar(position = 'dodge') +
      labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
      ggtitle('Gender per Condition') +
      scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
      theme_apa() 
    ggplotly(plot_gender)
      
     plot_gender_2 <-  ggstatsplot::ggbarstats(data = df_medal_distinct, x = gender, y = condition) +
        labs(x = 'Condition', y = 'Proportion', tag = 'B' ) +
        ggtitle('Gender per Condition') +
        scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ), labels = c('Male', 'Female'), 'Gender')  +
        theme_apa() 
      ggplotly(plot_gender_2)

ggpubr::ggarrange(plot_educ, plot_gender, boxplot_age, nrow=2, ncol=2)    

    
```
## Positive Mood {-}

```{r}
describe.by(df_medal_clean$pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
``` 

Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


## Positive Recent Memory {-}

```{r}
describe.by(df_medal_clean$rec_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution
```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```

Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


## Positive Remote Memory {-}

```{r}
describe.by(df_medal_clean$rem_mem_pos_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```

Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_pos_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


## Negative Mood {-}

```{r}
describe.by(df_medal_clean$neg_mood, group=df_medal_clean$condition)%>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))

```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```
Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
ggplot(data= df_medal_clean, aes(x=neg_mood_cs)) + geom_histogram()+facet_grid(cols=vars(condition))
```


## Negative Recent Memory {-}

```{r}
describe.by(df_medal_clean$rec_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```
Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rec_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


## Negative Remote Memory {-}

```{r}
describe.by(df_medal_clean$rem_mem_neg_mood, group=df_medal_clean$condition) %>% rbindlist() %>% mutate(vars=c("cont", "rem", "dep"))
```

Distribution

```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood)) + geom_histogram()+facet_grid(cols=vars(condition))
```
Centred Distribution 
```{r}
ggplot(data= df_medal_clean, aes(x=rem_mem_neg_mood_c)) + geom_histogram()+facet_grid(cols=vars(condition))
```


# Main Effects {.tabset}
A simple linear regression analysis was done to look at the differences in positive and negative mood ratings per condition.

## Linear Mixed Models for PA and NA
```{r}
#positive mood 
positive_model = lmer(pos_mood ~ condition + (1|subjectcode), data = df_medal_clean)


#negative mood
negative_model = lmer(neg_mood ~ condition + (1|subjectcode), data = df_medal_clean)



asis_output(tab_model (positive_model, negative_model, 
                      show.se = T, show.df = T, show.aic = T, transform = NULL,  
                      show.stat = T, show.std = T, 
                      title = 'Condition Differences Mood Rating', dv.labels = c('PA Scaled',  'NA Scaled') )$knitr)
```

## Plot {-}
```{r message=FALSE, warning=FALSE}

#create boxplot for negative and positive affect for each group with significance levels 
combine_data = df_medal_clean %>%
  tidyr::pivot_longer(cols=c(pos_mood, neg_mood), names_to = 'mood_type', values_to = 'mood_rating') %>% 
  dplyr::mutate(mood_type = factor(mood_type, levels = c('pos_mood' , 'neg_mood')))

# Plot
lm_plot = ggplot(combine_data, aes(x=condition, y = mood_rating, fill= condition)) + 
  geom_boxplot() +
  labs(x ='Group', y ='Affect (scaled units)' ) +
  scale_x_discrete(labels = c('control' = 'Never Depressed', 'remitted' = 'Remitted', 'depressed' = 'Depressed' )) +
  facet_wrap( ~ mood_type, ncol =3, nrow = 2, labeller  = labeller(mood_type = c('pos_mood' = 'Positive Affect', 'neg_mood'  = 'Negative Affect'))) +
  geom_signif(comparisons = list(c('control', 'remitted'), c('control' , 'depressed'), c('remitted' , 'depressed' )), map_signif_level = T, y_position = c(15, 13, 11)) +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  theme_apa() + 
  guides(fill=F)
ggplotly(lm_plot)
ggsave('plot_simple_regression.pdf', dpi = 320, width = 12, height = 8, path = "figures/")
```

## Follow-Up {-}
```{r}
emmeans::emmeans(positive_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
emmeans::emmeans(negative_model, pairwise ~ condition, pbkrtest.limit = 7800, lmerTest.limit=7800)
```



# Hypothesis 1: Independent Models  

In this section we test the first models from hypothesis 1 as stated in the pre-registraiton. These analyses look at the relationship between recent and remote emotional memory for both positive and negative affect, while also looking at group differences in these relaitonships based on depression status. 

```{r}
# set model families
model_families = c('gauss-link', "gauss-log", 'gauss-inv', 'Gamma-link', 'Gamma-log')
# detect cores for later
ncores = detectCores()-1
```

## Positive Affect {- .tabset}

First we run the positive affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects. 

### Recent {- }
```{r}
# Model equation
h1_post_recent_eq = "pos_mood_cs ~ 1 + condition*rec_mem_pos_mood_cs_lag + gender + age + education + (1 + rec_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_post_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_recent_models,  dv.labels = names(h1_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
```

```{r}
summary(h1_pos_recent_models[[2]])
plot_diagnostics(h1_pos_recent_models)
car::vif(h1_pos_recent_models[[2]]) #Vifs were inspected without interaction and deemed OK
```

##### Follow-up {-}

In the follow-up we look at the pairwise differences in the slopes, and the indiivuds
```{r}
emmeans::emtrends(h1_pos_recent_models[[2]], pairwise ~ condition, var='rec_mem_pos_mood_cs_lag', lmerTest.limit = 3500, pbkrtest.limit = 3500 )
```

```{r}
#create separate dataframes 
df_medal_clean_control = df_medal_clean %>% filter(condition == 'control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition == 'remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition == 'depressed') 


m1_pos_rec_control <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  +  rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rec_remitted <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1 + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rec_depressed <-   lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag + gender + age + education 
                     + (1  + rec_mem_pos_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_pos_rec_control, m1_pos_rec_remitted, m1_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```


### Remote {-}
```{r}
# Model equation
h1_pos_remote_eq = "pos_mood_cs ~ 1 +  condition*rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_pos_remote_models, dv.labels = names(h1_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)
```

```{r}
summary(h1_pos_remote_models[[2]])
plot_diagnostics(h1_pos_remote_models)
car::vif(h1_pos_remote_models[[2]]) #VIFs without interactions were fine
```


##### Follow-up {-}
```{r}
emmeans::emtrends(h1_pos_remote_models[[4]], pairwise ~ condition, var='rem_mem_pos_mood_cs_lag' )

```


```{r}

m1_pos_rem_control <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_control,
                    control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rem_remitted <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                     data=df_medal_clean_remitted,
                     control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_pos_rem_depressed <- lmer(pos_mood_cs ~ 1 + rem_mem_pos_mood_cs_lag + gender + age + education + (1  + rem_mem_pos_mood_cs_lag|subjectcode), 
                       data=df_medal_clean_depressed,
                       control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_pos_rem_control, m1_pos_rem_remitted, m1_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 

```




## Negative Affect {- .tabset}

We next run the negative affect models. For each of the recent and remote models, we check the best fitting model as stated in the pre-registration based on the AIC and the residuals. We then check whether mediation analyses are warranted, and run those. Within each tab below, we present the results and steps taken, as well as post-hoc analyses to examine the directionality of the effects

### Recent {-}
```{r}
# Model equation
h1_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag  + gender + age + education + (1 + rec_mem_neg_mood_cs_lag | subjectcode)"
# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'), .errorhandling = 'remove') %dopar% {
  fit_all_mods(h1_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_recent_models,  dv.labels = names(h1_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```


```{r}
summary(h1_neg_recent_models[[4]])
plot_diagnostics(h1_neg_recent_models)
car::vif(h1_neg_recent_models[[4]])
```



#### Follow-up {-}
```{r}
emmeans::emtrends(h1_neg_recent_models[[4]], pairwise ~ condition, var='rec_mem_neg_mood_cs_lag' )

emmeans::emmeans(h1_neg_recent_models[[4]], identity ~ rec_mem_neg_mood_cs_lag | condition )
```


```{r}

m1_neg_rec_control <- glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_remitted <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rec_depressed <-glmer(neg_mood_cs ~ 1 + rec_mem_neg_mood_cs_lag + gender + age + education + (0  + rec_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rec_control, m1_neg_rec_remitted, m1_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```

### Remote {-}
```{r}
# Model equation
h1_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag  + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h1_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h1_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h1_neg_remote_models,   dv.labels = names(h1_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```


```{r}
summary(h1_neg_remote_models[[2]])
plot_diagnostics(h1_neg_remote_models)
car::vif(h1_neg_remote_models[[2]])
```


#### Follow-up {-}
```{r}
emmeans::emtrends(h1_neg_remote_models[[2]], pairwise ~ condition, var='rem_mem_neg_mood_cs_lag' )

```

```{r}

m1_neg_rem_control <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_remitted <- glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m1_neg_rem_depressed <-  glmer(neg_mood_cs ~ 1 + rem_mem_neg_mood_cs_lag + gender + age + education + (0  + rem_mem_neg_mood_cs_lag|subjectcode),
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity),
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m1_neg_rem_control, m1_neg_rem_remitted, m1_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```



# Hypothesis 2: Current Affect Moderation 

For H2, we want to see whether current affective states moderate the ability to recall previous affective states. That is, do current negative affect feelings moderate the relationship between affect and recall? 

```{r}
#create separate dataframes for each group
df_medal_clean_control = df_medal_clean %>% filter(condition=='control')
df_medal_clean_remitted = df_medal_clean %>% filter(condition=='remitted')
df_medal_clean_depressed = df_medal_clean %>% filter(condition=='depressed')

#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_clean_remote = df_medal_clean %>% filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717))

#create separate dataframes per condition 
df_medal_clean_remote_control = df_medal_clean_remote %>% filter(condition=='control')
df_medal_clean_remote_remitted = df_medal_clean_remote %>% filter(condition=='remitted')
df_medal_clean_remote_depressed = df_medal_clean_remote %>% filter(condition=='depressed')
```

## Positive Affect {- .tabset}

### Recent {-}
An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted. 
```{r fig.height=20, fig.width=15}
h2_pos_recent_eq = "pos_mood_cs ~ condition*rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_pos_mood_cs_lag + pos_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_recent_models,  dv.labels = names(h2_pos_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)


```

Due to singularity this was determined to be the best fit for the moddel

```{r}
summary(h2_pos_recent_models[[2]])
plot_diagnostics(h2_pos_recent_models)
car::vif(h2_pos_recent_models[[2]])
```



#### Follow-up {-}
```{r}
m2_pos_rec_control <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_control)

m2_pos_rec_remitted <- lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_remitted)

m2_pos_rec_depressed <-  lm(pos_mood_cs ~ 1 + rec_mem_pos_mood_cs_lag*pos_mood_cs_lag_rec + gender + age + education,
                   data=df_medal_clean_depressed)

asis_output(tab_model(m2_pos_rec_control, m2_pos_rec_remitted, m2_pos_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr)
```


### Remote {- .tabset}

An lmer model was fitted with gaussian identity, log & inverse as well as Gamma log, inverse, and identity. The models, however, were not a good fit and the gaussian identity model coped with singularity. Hence, a normal linear regression model was fitted. 
```{r}
h2_pos_remote_eq = "pos_mood_cs ~ condition*rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem + gender + age + education + (1 | subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_pos_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_pos_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_pos_remote_models,  dv.labels = names(h2_pos_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)


```


Due to singularity a regular linear model was fitted.
```{r}
summary(h2_pos_remote_models[[2]])
plot_diagnostics(h2_pos_remote_models)
car::vif(h2_pos_remote_models[[2]])

```


#### Follow-up {-}
```{r}

m2_pos_rem_control <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_control)

m2_pos_rem_remitted <- lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education,  
                   data=df_medal_clean_remote_remitted)

m2_pos_rem_depressed <-  lm(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*pos_mood_cs_lag_rem  + gender + age + education, 
                   data=df_medal_clean_remote_depressed)

asis_output(tab_model(m2_pos_rem_control, m2_pos_rem_remitted, m2_pos_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```



## Negative Affect {- .tabset}

###  Recent {-}
```{r}
h2_neg_recent_eq = "neg_mood_cs ~ condition*rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + (1 + rec_mem_neg_mood_cs_lag + neg_mood_cs_lag_rec|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_recent_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_recent_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_recent_models, dv.labels = names(h2_neg_recent_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```

Check Final Model
```{r }
summary(h2_neg_recent_models[[4]])
plot_diagnostics(h2_neg_recent_models) 
car::vif(h2_neg_recent_models[[4]])

```




#### Follow-up {-}
```{r}

m2_neg_rec_control <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + 
                              gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_remitted <- glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode), 
                   data=df_medal_clean_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rec_depressed <-  glmer(neg_mood_cs ~ rec_mem_neg_mood_cs_lag*neg_mood_cs_lag_rec + gender + age + education + 
                              (1  + rec_mem_neg_mood_cs_lag | subjectcode) + (1 +  neg_mood_cs_lag_rec|subjectcode),  
                   data=df_medal_clean_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rec_control, m2_neg_rec_remitted, m2_neg_rec_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```




### Remote {-}
```{r}
h2_neg_remote_eq = "neg_mood_cs ~ condition*rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem + gender + age + education + (1 + rem_mem_neg_mood_cs_lag + neg_mood_cs_lag_rem|subjectcode)"

# set clusters
cl = makeCluster(ncores)
registerDoParallel(cl)

# run parallel
h2_neg_remote_models = foreach(family = model_families, .combine = 'c', .packages = c('lmerTest'),  .errorhandling = "remove") %dopar% {
  fit_all_mods(h2_neg_remote_eq, df_medal_clean, family) }
stopCluster(cl)

# print the models into a single table
asis_output( tab_model(h2_neg_remote_models, dv.labels = names(h2_neg_remote_models), 
                       show.icc = T, show.aic = T, transform = NULL, show.se = T, digits = 3)$knitr)

```

Check Final Model
```{r}
summary(h2_neg_remote_models[[4]])
plot_diagnostics(h2_neg_remote_models)
car::vif(h2_neg_remote_models[[4]])
```




#### Follow-up {-}
```{r}

m2_neg_rem_control <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_control,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_remitted <- glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode),
                   data=df_medal_clean_remote_remitted,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

m2_neg_rem_depressed <-  glmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*neg_mood_cs_lag_rem  + gender + age + education
                            + (1  +  rem_mem_neg_mood_cs_lag|subjectcode) + (1 + neg_mood_cs_lag_rem|subjectcode), 
                   data=df_medal_clean_remote_depressed,
                   family = Gamma(link=identity), 
                   control = glmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)))

asis_output(tab_model(m2_neg_rem_control, m2_neg_rem_remitted, m2_neg_rem_depressed, show.se = T, show.aic = T, transform = NULL, dv.labels = c('control', 'remitted', 'depressed'))$knitr) 
```


# Exploratory Analyses

## Mediation Models {.tabset}

Following the tests we ran for hypothesis 1, we would also like to see whether or not mediation effects exist. 

### RecPA {-}

```{r}

#create a new dataframe for the remote variables, where the participants with the singular datapoints are removed 
df_medal_moderation =  df_medal_clean %>% 
  filter (!(subjectcode == 822 | subjectcode == 906 | subjectcode == 717)) %>% 
  filter(!(is.na(pos_mood) | is.na(rec_mem_pos_mood_lag))) 

# med model
med_fit_h1_pos_rec <- lme4::lmer(rec_mem_pos_mood_lag ~ condition_dummy +
                                   age + gender + education + 
                                   (1 |subjectcode),
                                 data = df_medal_moderation)
# full model
out_fit_h1_pos_rec <-lme4::lmer(pos_mood ~ condition_dummy + rec_mem_pos_mood_lag +
                                  age + gender +education +
                                  (1 + rec_mem_pos_mood_lag |subjectcode), 
                                data = df_medal_moderation)

#mediation analysis for control and depressed
med_pos_rec_dep <- mediation::mediate(med_fit_h1_pos_rec, out_fit_h1_pos_rec,
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 3 ,  mediator = 'rec_mem_pos_mood_lag', 
                                      sims=10000)
summary(med_pos_rec_dep)

```


### RemNA {-}
```{r}
df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rec_mem_neg_mood_lag))) 

# med model
med_fit_h1_neg_rec <- lme4::lmer(rec_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +  
                                   (1 |subjectcode), 
                                 data = df_medal_moderation)
# full model
out_fit_h1_neg_rec <- lme4::lmer(neg_mood ~ condition_dummy + rec_mem_neg_mood_lag +
                                  age + gender +education + 
                                  (1 + rec_mem_neg_mood_lag|subjectcode), 
                                data = df_medal_moderation)


#mediation analysis for control and remitted
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' , mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 2 ,   
                           sims=10000))

#mediation analysis for control and depressed
summary(mediation::mediate(med_fit_h1_neg_rec, out_fit_h1_neg_rec, 
                           treat = 'condition_dummy' ,  mediator = 'rec_mem_neg_mood_lag', control.value = 1, treat.value = 3 ,
                           sims=10000))

```

## Moderated Mediation {.tabset}

### RemPA-CurrPA {-}
```{r}

df_medal_moderation = df_medal_clean_remote %>% 
  filter(!(is.na(pos_mood) | is.na(rem_mem_pos_mood_lag))) 

#mediation first
med_fit_h2_pos_rem <- lme4::lmer(rem_mem_pos_mood_lag ~ condition_dummy + 
                                   age + gender + education + 
                                   (1 |subjectcode), 
                                 data =df_medal_moderation)
# full model
out_fit_h2_pos_rem <-lme4::lmer(pos_mood ~ condition_dummy + rem_mem_pos_mood_lag 
                                + age + gender +education +
                                  (1 + rem_mem_pos_mood_lag |subjectcode),
                                data=df_medal_moderation)


#mediation analysis for control and remitted
med_pos_rem_rem <- mediation::mediate(med_fit_h2_pos_rem, out_fit_h2_pos_rem, 
                                      treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_pos_mood_lag')
summary(med_pos_rem_rem)

#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_pos_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV =condition_dummy, DV = pos_mood, M = rem_mem_pos_mood_lag, Mod = pos_mood_lag_rem) 
rbindlist(med_h2_pos_rem_rem$paths, idcol = T)

```

### RemNA - CurrNA {-}
```{r paged.print=TRUE}
df_medal_moderation = df_medal_clean_remote %>% filter(!(is.na(neg_mood) | is.na(rem_mem_neg_mood_lag))) 

#mediation first
med_fit_h2_neg_rem <- lme4::lmer(rem_mem_neg_mood_lag ~ condition_dummy +
                                   age + gender + education +
                                   (1 |subjectcode),
                                 control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                 data =df_medal_moderation)

out_fit_h2_neg_rem <-lme4::lmer(neg_mood ~ condition_dummy + rem_mem_neg_mood_lag +
                                  age + gender + education +
                                  (1 + rem_mem_neg_mood_lag |subjectcode), 
                                control = lmerControl(calc.derivs = F, optimizer='bobyqa', optCtrl=list(maxfun=1e6)),
                                data=df_medal_moderation)

#mediation analysis for control and depressed
med_neg_rem_dep <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem,  
                                      mediator = 'rem_mem_neg_mood_lag',  treat = 'condition_dummy' , 
                                      control.value = 1, treat.value = 3 , sims=10000) 
summary(med_neg_rem_dep)

#mediation analysis for control and remitted
med_neg_rem_rem <- mediation::mediate(med_fit_h2_neg_rem, out_fit_h2_neg_rem, treat = 'condition_dummy' , control.value = 1, treat.value = 2 ,  mediator = 'rem_mem_neg_mood_lag', sims=10000) 
summary(med_neg_rem_rem)

#create new dataframes with only control remitted and only control depressed
df_medal_con_rem = df_medal_clean %>% filter(condition=='control' | condition == 'remitted')
df_medal_con_dep = df_medal_clean %>% filter(condition=='control' | condition == 'depressed')

#remitted group
df_medal_con_rem$condition_dummy = build_contrast(df_medal_con_rem$condition, 'control', 'remitted')
med_h2_neg_rem_rem <- mdt_moderated(data = df_medal_con_rem, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_rem$paths, idcol = T)

#depressed group
df_medal_con_dep$condition_dummy = build_contrast(df_medal_con_dep$condition, 'control', 'depressed')
med_h2_neg_rem_dep <- mdt_moderated(data = df_medal_con_dep, IV = condition_dummy, DV = neg_mood, M = rem_mem_neg_mood_lag, Mod = neg_mood_lag_rem) 
rbindlist(med_h2_neg_rem_dep$paths,idcol = T)
  
```

## Context

```{r}
asis_output(tab_model( (lmer(pos_mood_cs ~ rec_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)), 
           (lmer(pos_mood_cs ~ rem_mem_pos_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
            (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)),
             (lmer(neg_mood_cs ~ rem_mem_neg_mood_cs_lag*context_location + (1  | subjectcode), data = df_medal_clean)))$knitr)

```

## SRET

In addition to the EMA, participants also completed the SRET. This is a lab based memory bias measure. We want to check whether our memory bias measures from real-life are linked to ones from the lab in this supplementary analysis. 

```{r}

library('haven')
sret_file = file.path("Z:/", "inbox", "transfer-2023-12-07-02-15-pm", 'MEDAL_pre and post quest and remote recall_workfile!!_for paper only var.sav')
sret_data = read_sav(sret_file) 
sret_data = sret_data %>% select(c(1,4)) %>% distinct()

df_medal_rec_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rec_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()  %>% ungroup()


df_medal_rec_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rec_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr =  tryCatch(cor(rec_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REC") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct() %>% ungroup()


df_medal_rem_cor1 = df_medal_clean %>%
  dplyr::select(subjectcode, pos_mood, rem_mem_pos_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_pos_mood_lag, pos_mood), warning = function(e) NA),
                model_type = "PA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()

df_medal_rem_cor2 = df_medal_clean %>%
  dplyr::select(subjectcode, neg_mood, rem_mem_neg_mood_lag) %>%
  na.omit() %>% 
  dplyr::group_by(subjectcode) %>%
  dplyr::mutate(corr = tryCatch(cor(rem_mem_neg_mood_lag, neg_mood), warning = function(e) NA),
                model_type = "NA_REM") %>% 
  dplyr::select(subjectcode, corr, model_type) %>% 
  dplyr::distinct()


df_cors = rbindlist(list(df_medal_rec_cor1, df_medal_rec_cor2, df_medal_rem_cor1, df_medal_rem_cor2))

df_ref = merge(sret_data, df_cors, by.x = 'subject', by.y = 'subjectcode')
df_ref = df_ref %>% rename(SRET = SRET_BiasGotlib_neg) %>% dplyr::mutate(group = ifelse(subject<800, "remitted", 
                                                                    ifelse(subject>=900, "control", 'depressed')) )


for (mod_type in c("PA_REC", "PA_REM", "NA_REC", "NA_REM")){
  print(mod_type)
  print(summary(lm(SRET ~ corr*group, data = df_ref %>% filter(model_type == mod_type))))
}


```


## Relative Bias

Here we want to check for the relative NA/PA bias. So we extact the random effects from the models to get the slopes for each subject, and then compare the NA/PA slopes. This tells us if theres a bias between negative and positive memory.

```{r}

# Get individual slopes
pos_rec_ref = as.data.frame(ranef(h1_pos_recent_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rem_ref = as.data.frame(ranef(h1_pos_remote_models[[2]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rec_ref = as.data.frame(ranef(h1_neg_recent_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
neg_rem_ref = as.data.frame(ranef(h1_neg_remote_models[[4]])) %>% filter(term!='(Intercept)') %>% select(c(3:5))
pos_rec_ref$mod = "PA_REC"
pos_rem_ref$mod = "PA_REM"
neg_rec_ref$mod = "NA_REC"
neg_rem_ref$mod = "NA_REM"

# Put together into a new data frame
df_ref = rbindlist(list(pos_rec_ref, pos_rem_ref, neg_rec_ref, neg_rem_ref))
df_ref = df_ref %>% 
  dplyr::mutate(sub = as.numeric(as.character(grp))) %>%
  dplyr::mutate(group = ifelse(sub <= 800, "remitted", 
                               ifelse(sub>=900, "control", 'depressed')) )

df_ref_rec = df_ref %>% filter(mod == 'PA_REC' | mod=='NA_REC')
df_ref_rem = df_ref %>% filter(mod == 'PA_REM' | mod=='NA_REM')
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rec))
summary(lmer(condsd ~ mod*group + (1|sub), data = df_ref_rem))
```



# Plots

First we set our theme and fix the data for the figures we want
```{r}
# Theme
ggtheme = theme(text = element_text(size = 8), 
                panel.background = element_rect(fill = "transparent"), 
                panel.grid.major = element_line(color = "grey90"),
                panel.grid.minor = element_line(color = "grey100"),
                panel.border = element_rect(color = "grey80", fill = "transparent"))

# Estimate the SD
df_medal_clean2 = df_medal_clean %>% 
  mutate(pos_mood_sd = ifelse( (pos_mood_cs_lag_rec > (mean(pos_mood_cs_lag_rec, na.rm=T)+sd(pos_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (pos_mood_cs_lag_rec < (mean(pos_mood_cs_lag_rec, na.rm=T)-sd(pos_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean"))) %>% 
    mutate(neg_mood_sd = ifelse( (neg_mood_cs_lag_rec > (mean(neg_mood_cs_lag_rec, na.rm=T)+sd(neg_mood_cs_lag_rec, na.rm=T))), "+1SD",
                               ifelse( (neg_mood_cs_lag_rec < (mean(neg_mood_cs_lag_rec, na.rm=T)-sd(neg_mood_cs_lag_rec, na.rm =T))), "-1SD", "Mean")))

# Fix factor levels
df_medal_clean2$Condition =  factor(df_medal_clean2$condition, levels=c('control', 'remitted', 'depressed' ), labels=c('Never-Depressed', 'Remitted', 'Depressed'))
df_medal_clean2$pos_mood_sd =  factor(df_medal_clean2$pos_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))
df_medal_clean2$neg_mood_sd =  factor(df_medal_clean2$neg_mood_sd, levels=c('-1SD', 'Mean', '+1SD' ))


```

### PA-CUR-REC
```{r}

# Plot
plot_pa_rec = ggplot(df_medal_clean2, aes(x = rec_mem_pos_mood_c_lag, y=pos_mood_c, color = pos_mood_sd, fill = pos_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC[PA], " (subject-centered a.u.)")), y = "PA (subject-centered a.u.)", color = bquote(CUR[PA]), fill = bquote(CUR[PA]) ) +
  ggtheme + 
  scale_color_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  facet_grid(.~Condition) 
 
plot_pa_rec
```

### PA-CUR-REC

```{r}

# Plot
plot_na_rec = 
  ggplot(df_medal_clean2, aes(x = rec_mem_neg_mood_c_lag, y=neg_mood_c, color = neg_mood_sd, fill = neg_mood_sd)) +
  geom_smooth(method = 'lm', alpha = 0.15) +
  labs( x = bquote(paste(REC['NA'], " (subject-centered a.u.)")), y = "NA (subject-centered a.u.)", color = bquote(CUR["NA"]), fill = bquote(CUR["NA"]) ) +
    scale_color_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  scale_fill_manual(values = wesanderson::wes_palette(n=3, name='GrandBudapest2' ))  +
  ggtheme + 
  facet_grid(.~Condition) 
plot_na_rec
```

### Combined Plot for pub
```{r}
ggarrange(NA, plot_pa_rec, NA,  plot_na_rec,
          widths=c(0.05, 1, 0.05, 1), ncol = 2, nrow = 2,
          labels=c("A", NA, "B"))
ggsave("figures/fig1_threeway.pdf", device = 'pdf', dpi = 320, height = 6)

```






